{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "_pIZ3ZXNp7cf" }, "source": [ "Welcome to Tortoise! 🐒🐒🐒🐒\n", "\n", "Before you begin, I **strongly** recommend you turn on a GPU runtime.\n", "\n", "There's a reason this is called \"Tortoise\" - this model takes up to a minute to perform inference for a single sentence on a GPU. Expect waits on the order of hours on a CPU." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "JrK20I32grP6" }, "outputs": [], "source": [ "#first follow the instructions in the README.md file under Local Installation\n", "!pip3 install -r requirements.txt\n", "# !python3 setup.py install" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "id": "Gen09NM4hONQ" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/manmay/anaconda3/envs/tortoise/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[2023-07-15 10:55:28,559] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed info: version=0.8.3, git-hash=unknown, git-branch=unknown\n", "[2023-07-15 10:55:28,603] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter mp_size is deprecated use tensor_parallel.tp_size instead\n", "[2023-07-15 10:55:28,605] [INFO] [logging.py:93:log_dist] [Rank -1] quantize_bits = 8 mlp_extra_grouping = False, quantize_groups = 1\n", "WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: module 'transformers.models' has no attribute 'bloom'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Using /home/manmay/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...\n", "Detected CUDA files, patching ldflags\n", "Emitting ninja build file /home/manmay/.cache/torch_extensions/py39_cu117/transformer_inference/build.ninja...\n", "Building extension module transformer_inference...\n", "Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)\n", "Loading extension module transformer_inference...\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "ninja: no work to do.\n", "Time to load transformer_inference op: 0.9313881397247314 seconds\n", "[2023-07-15 10:55:34,938] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed-Inference config: {'layer_id': 0, 'hidden_size': 1024, 'intermediate_size': 4096, 'heads': 16, 'num_hidden_layers': -1, 'fp16': False, 'pre_layer_norm': True, 'local_rank': -1, 'stochastic_mode': False, 'epsilon': 1e-05, 'mp_size': 1, 'q_int8': False, 'scale_attention': True, 'triangular_masking': True, 'local_attention': False, 'window_size': 1, 'rotary_dim': -1, 'rotate_half': False, 'rotate_every_two': True, 'return_tuple': True, 'mlp_after_attn': True, 'mlp_act_func_type': , 'specialized_mode': False, 'training_mp_size': 1, 'bigscience_bloom': False, 'max_out_tokens': 1024, 'scale_attn_by_inverse_layer_idx': False, 'enable_qkv_quantization': False, 'use_mup': False, 'return_single_tuple': False}\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Using /home/manmay/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...\n", "No modifications detected for re-loaded extension module transformer_inference, skipping build step...\n", "Loading extension module transformer_inference...\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Time to load transformer_inference op: 0.15709829330444336 seconds\n" ] } ], "source": [ "# Imports used through the rest of the notebook.\n", "import torch\n", "import torchaudio\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", "import IPython\n", "\n", "from tortoise.api import TextToSpeech\n", "from tortoise.utils.audio import load_audio, load_voice, load_voices\n", "\n", "# This will download all the models used by Tortoise from the HF hub.\n", "# tts = TextToSpeech()\n", "# If you want to use deepspeed the pass use_deepspeed=True nearly 2x faster than normal\n", "tts = TextToSpeech(use_deepspeed=True, kv_cache=True)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "id": "bt_aoxONjfL2" }, "outputs": [], "source": [ "# This is the text that will be spoken.\n", "text = \"Joining two modalities results in a surprising increase in generalization! What would happen if we combined them all?\"\n", "\n", "# Here's something for the poetically inclined.. (set text=)\n", "\"\"\"\n", "Then took the other, as just as fair,\n", "And having perhaps the better claim,\n", "Because it was grassy and wanted wear;\n", "Though as for that the passing there\n", "Had worn them really about the same,\"\"\"\n", "\n", "# Pick a \"preset mode\" to determine quality. Options: {\"ultra_fast\", \"fast\" (default), \"standard\", \"high_quality\"}. See docs in api.py\n", "preset = \"ultra_fast\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "SSleVnRAiEE2" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0m\u001b[01;34mangie\u001b[0m/ \u001b[01;34mfreeman\u001b[0m/ \u001b[01;34mmyself\u001b[0m/ \u001b[01;34mtom\u001b[0m/ \u001b[01;34mtrain_grace\u001b[0m/\n", "\u001b[01;34mapplejack\u001b[0m/ \u001b[01;34mgeralt\u001b[0m/ \u001b[01;34mpat\u001b[0m/ \u001b[01;34mtrain_atkins\u001b[0m/ \u001b[01;34mtrain_kennard\u001b[0m/\n", "\u001b[01;34mcond_latent_example\u001b[0m/ \u001b[01;34mhalle\u001b[0m/ \u001b[01;34mpat2\u001b[0m/ \u001b[01;34mtrain_daws\u001b[0m/ \u001b[01;34mtrain_lescault\u001b[0m/\n", "\u001b[01;34mdaniel\u001b[0m/ \u001b[01;34mjlaw\u001b[0m/ \u001b[01;34mrainbow\u001b[0m/ \u001b[01;34mtrain_dotrice\u001b[0m/ \u001b[01;34mtrain_mouse\u001b[0m/\n", "\u001b[01;34mdeniro\u001b[0m/ \u001b[01;34mlj\u001b[0m/ \u001b[01;34msnakes\u001b[0m/ \u001b[01;34mtrain_dreams\u001b[0m/ \u001b[01;34mweaver\u001b[0m/\n", "\u001b[01;34memma\u001b[0m/ \u001b[01;34mmol\u001b[0m/ \u001b[01;34mtim_reynolds\u001b[0m/ \u001b[01;34mtrain_empire\u001b[0m/ \u001b[01;34mwilliam\u001b[0m/\n" ] }, { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Tortoise will attempt to mimic voices you provide. It comes pre-packaged\n", "# with some voices you might recognize.\n", "\n", "# Let's list all the voices available. These are just some random clips I've gathered\n", "# from the internet as well as a few voices from the training dataset.\n", "# Feel free to add your own clips to the voices/ folder.\n", "%ls tortoise/voices\n", "\n", "IPython.display.Audio('tortoise/voices/tom/1.wav')" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "KEXOKjIvn6NW" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Generating autoregressive samples..\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/16 [00:00 6\u001b[0m gen \u001b[39m=\u001b[39m tts\u001b[39m.\u001b[39;49mtts_with_preset(text, voice_samples\u001b[39m=\u001b[39;49mvoice_samples, conditioning_latents\u001b[39m=\u001b[39;49mconditioning_latents, \n\u001b[1;32m 7\u001b[0m preset\u001b[39m=\u001b[39;49mpreset)\n\u001b[1;32m 8\u001b[0m torchaudio\u001b[39m.\u001b[39msave(\u001b[39m'\u001b[39m\u001b[39mgenerated.wav\u001b[39m\u001b[39m'\u001b[39m, gen\u001b[39m.\u001b[39msqueeze(\u001b[39m0\u001b[39m)\u001b[39m.\u001b[39mcpu(), \u001b[39m24000\u001b[39m)\n\u001b[1;32m 9\u001b[0m IPython\u001b[39m.\u001b[39mdisplay\u001b[39m.\u001b[39mAudio(\u001b[39m'\u001b[39m\u001b[39mgenerated.wav\u001b[39m\u001b[39m'\u001b[39m)\n", "File \u001b[0;32m/data/speech_synth/tortoise-tts/tortoise/api.py:329\u001b[0m, in \u001b[0;36mTextToSpeech.tts_with_preset\u001b[0;34m(self, text, preset, **kwargs)\u001b[0m\n\u001b[1;32m 327\u001b[0m settings\u001b[39m.\u001b[39mupdate(presets[preset])\n\u001b[1;32m 328\u001b[0m settings\u001b[39m.\u001b[39mupdate(kwargs) \u001b[39m# allow overriding of preset settings with kwargs\u001b[39;00m\n\u001b[0;32m--> 329\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtts(text, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49msettings)\n", "File \u001b[0;32m/data/speech_synth/tortoise-tts/tortoise/api.py:412\u001b[0m, in \u001b[0;36mTextToSpeech.tts\u001b[0;34m(self, text, voice_samples, conditioning_latents, k, verbose, use_deterministic_seed, return_deterministic_state, num_autoregressive_samples, temperature, length_penalty, repetition_penalty, top_p, max_mel_tokens, cvvp_amount, diffusion_iterations, cond_free, cond_free_k, diffusion_temperature, **hf_generate_kwargs)\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39mGenerating autoregressive samples..\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 411\u001b[0m \u001b[39mfor\u001b[39;00m b \u001b[39min\u001b[39;00m tqdm(\u001b[39mrange\u001b[39m(num_batches), disable\u001b[39m=\u001b[39m\u001b[39mnot\u001b[39;00m verbose):\n\u001b[0;32m--> 412\u001b[0m codes \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mautoregressive\u001b[39m.\u001b[39;49minference_speech(auto_conditioning, text_tokens,\n\u001b[1;32m 413\u001b[0m do_sample\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m,\n\u001b[1;32m 414\u001b[0m top_p\u001b[39m=\u001b[39;49mtop_p,\n\u001b[1;32m 415\u001b[0m temperature\u001b[39m=\u001b[39;49mtemperature,\n\u001b[1;32m 416\u001b[0m num_return_sequences\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mautoregressive_batch_size,\n\u001b[1;32m 417\u001b[0m length_penalty\u001b[39m=\u001b[39;49mlength_penalty,\n\u001b[1;32m 418\u001b[0m repetition_penalty\u001b[39m=\u001b[39;49mrepetition_penalty,\n\u001b[1;32m 419\u001b[0m max_generate_length\u001b[39m=\u001b[39;49mmax_mel_tokens,\n\u001b[1;32m 420\u001b[0m \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mhf_generate_kwargs)\n\u001b[1;32m 421\u001b[0m padding_needed \u001b[39m=\u001b[39m max_mel_tokens \u001b[39m-\u001b[39m codes\u001b[39m.\u001b[39mshape[\u001b[39m1\u001b[39m]\n\u001b[1;32m 422\u001b[0m codes \u001b[39m=\u001b[39m F\u001b[39m.\u001b[39mpad(codes, (\u001b[39m0\u001b[39m, padding_needed), value\u001b[39m=\u001b[39mstop_mel_token)\n", "File \u001b[0;32m/data/speech_synth/tortoise-tts/tortoise/models/autoregressive.py:490\u001b[0m, in \u001b[0;36mUnifiedVoice.inference_speech\u001b[0;34m(self, speech_conditioning_latent, text_inputs, input_tokens, num_return_sequences, max_generate_length, typical_sampling, typical_mass, **hf_generate_kwargs)\u001b[0m\n\u001b[1;32m 488\u001b[0m logits_processor \u001b[39m=\u001b[39m LogitsProcessorList([TypicalLogitsWarper(mass\u001b[39m=\u001b[39mtypical_mass)]) \u001b[39mif\u001b[39;00m typical_sampling \u001b[39melse\u001b[39;00m LogitsProcessorList()\n\u001b[1;32m 489\u001b[0m max_length \u001b[39m=\u001b[39m trunc_index \u001b[39m+\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmax_mel_tokens \u001b[39m-\u001b[39m \u001b[39m1\u001b[39m \u001b[39mif\u001b[39;00m max_generate_length \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39melse\u001b[39;00m trunc_index \u001b[39m+\u001b[39m max_generate_length\n\u001b[0;32m--> 490\u001b[0m gen \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49minference_model\u001b[39m.\u001b[39;49mgenerate(inputs, bos_token_id\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mstart_mel_token, pad_token_id\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mstop_mel_token, eos_token_id\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mstop_mel_token,\n\u001b[1;32m 491\u001b[0m max_length\u001b[39m=\u001b[39;49mmax_length, logits_processor\u001b[39m=\u001b[39;49mlogits_processor,\n\u001b[1;32m 492\u001b[0m num_return_sequences\u001b[39m=\u001b[39;49mnum_return_sequences, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mhf_generate_kwargs)\n\u001b[1;32m 493\u001b[0m \u001b[39mreturn\u001b[39;00m gen[:, trunc_index:]\n", "File \u001b[0;32m~/anaconda3/envs/tortoise/lib/python3.9/site-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator..decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[39m@functools\u001b[39m\u001b[39m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mdecorate_context\u001b[39m(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[39mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[39mreturn\u001b[39;00m func(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", "File \u001b[0;32m~/anaconda3/envs/tortoise/lib/python3.9/site-packages/transformers/generation_utils.py:1310\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, max_length, min_length, do_sample, early_stopping, num_beams, temperature, top_k, top_p, typical_p, repetition_penalty, bad_words_ids, force_words_ids, bos_token_id, pad_token_id, eos_token_id, length_penalty, no_repeat_ngram_size, encoder_no_repeat_ngram_size, num_return_sequences, max_time, max_new_tokens, decoder_start_token_id, use_cache, num_beam_groups, diversity_penalty, prefix_allowed_tokens_fn, logits_processor, renormalize_logits, stopping_criteria, constraints, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, forced_bos_token_id, forced_eos_token_id, remove_invalid_values, synced_gpus, exponential_decay_length_penalty, **model_kwargs)\u001b[0m\n\u001b[1;32m 1302\u001b[0m input_ids, model_kwargs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_expand_inputs_for_generation(\n\u001b[1;32m 1303\u001b[0m input_ids,\n\u001b[1;32m 1304\u001b[0m expand_size\u001b[39m=\u001b[39mnum_return_sequences,\n\u001b[1;32m 1305\u001b[0m is_encoder_decoder\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mconfig\u001b[39m.\u001b[39mis_encoder_decoder,\n\u001b[1;32m 1306\u001b[0m \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mmodel_kwargs,\n\u001b[1;32m 1307\u001b[0m )\n\u001b[1;32m 1309\u001b[0m \u001b[39m# 12. run sample\u001b[39;00m\n\u001b[0;32m-> 1310\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msample(\n\u001b[1;32m 1311\u001b[0m input_ids,\n\u001b[1;32m 1312\u001b[0m logits_processor\u001b[39m=\u001b[39;49mlogits_processor,\n\u001b[1;32m 1313\u001b[0m logits_warper\u001b[39m=\u001b[39;49mlogits_warper,\n\u001b[1;32m 1314\u001b[0m stopping_criteria\u001b[39m=\u001b[39;49mstopping_criteria,\n\u001b[1;32m 1315\u001b[0m pad_token_id\u001b[39m=\u001b[39;49mpad_token_id,\n\u001b[1;32m 1316\u001b[0m eos_token_id\u001b[39m=\u001b[39;49meos_token_id,\n\u001b[1;32m 1317\u001b[0m output_scores\u001b[39m=\u001b[39;49moutput_scores,\n\u001b[1;32m 1318\u001b[0m return_dict_in_generate\u001b[39m=\u001b[39;49mreturn_dict_in_generate,\n\u001b[1;32m 1319\u001b[0m synced_gpus\u001b[39m=\u001b[39;49msynced_gpus,\n\u001b[1;32m 1320\u001b[0m \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mmodel_kwargs,\n\u001b[1;32m 1321\u001b[0m )\n\u001b[1;32m 1323\u001b[0m \u001b[39melif\u001b[39;00m is_beam_gen_mode:\n\u001b[1;32m 1324\u001b[0m \u001b[39mif\u001b[39;00m num_return_sequences \u001b[39m>\u001b[39m num_beams:\n", "File \u001b[0;32m~/anaconda3/envs/tortoise/lib/python3.9/site-packages/transformers/generation_utils.py:1963\u001b[0m, in \u001b[0;36mGenerationMixin.sample\u001b[0;34m(self, input_ids, logits_processor, stopping_criteria, logits_warper, max_length, pad_token_id, eos_token_id, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, synced_gpus, **model_kwargs)\u001b[0m\n\u001b[1;32m 1961\u001b[0m \u001b[39m# sample\u001b[39;00m\n\u001b[1;32m 1962\u001b[0m probs \u001b[39m=\u001b[39m nn\u001b[39m.\u001b[39mfunctional\u001b[39m.\u001b[39msoftmax(next_token_scores, dim\u001b[39m=\u001b[39m\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m)\n\u001b[0;32m-> 1963\u001b[0m next_tokens \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39;49mmultinomial(probs, num_samples\u001b[39m=\u001b[39;49m\u001b[39m1\u001b[39;49m)\u001b[39m.\u001b[39msqueeze(\u001b[39m1\u001b[39m)\n\u001b[1;32m 1965\u001b[0m \u001b[39m# finished sentences should have their next token be a padding token\u001b[39;00m\n\u001b[1;32m 1966\u001b[0m \u001b[39mif\u001b[39;00m eos_token_id \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "# Pick one of the voices from the output above\n", "voice = 'tom'\n", "\n", "# Load it and send it through Tortoise.\n", "voice_samples, conditioning_latents = load_voice(voice)\n", "gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents, \n", " preset=preset)\n", "torchaudio.save('generated.wav', gen.squeeze(0).cpu(), 24000)\n", "IPython.display.Audio('generated.wav')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "16Xs2SSC3BXa" }, "outputs": [], "source": [ "# Tortoise can also generate speech using a random voice. The voice changes each time you execute this!\n", "# (Note: random voices can be prone to strange utterances)\n", "gen = tts.tts_with_preset(text, voice_samples=None, conditioning_latents=None, preset=preset)\n", "torchaudio.save('generated.wav', gen.squeeze(0).cpu(), 24000)\n", "IPython.display.Audio('generated.wav')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "fYTk8KUezUr5" }, "outputs": [], "source": [ "# You can also combine conditioning voices. Combining voices produces a new voice\n", "# with traits from all the parents.\n", "#\n", "# Lets see what it would sound like if Picard and Kirk had a kid with a penchant for philosophy:\n", "voice_samples, conditioning_latents = load_voices(['pat', 'william'])\n", "\n", "gen = tts.tts_with_preset(\"They used to say that if man was meant to fly, he’d have wings. But he did fly. He discovered he had to.\", \n", " voice_samples=None, conditioning_latents=None, preset=preset)\n", "torchaudio.save('captain_kirkard.wav', gen.squeeze(0).cpu(), 24000)\n", "IPython.display.Audio('captain_kirkard.wav')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "t66yqWgu68KL" }, "outputs": [], "source": [ "del tts # Will break other cells, but necessary to conserve RAM if you want to run this cell.\n", "\n", "# Tortoise comes with some scripts that does a lot of the lifting for you. For example,\n", "# read.py will read a text file for you.\n", "!python3 tortoise/read.py --voice=train_atkins --textfile=tortoise/data/riding_hood.txt --preset=ultra_fast --output_path=.\n", "\n", "IPython.display.Audio('train_atkins/combined.wav')\n", "# This will take awhile.." ] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "tortoise-tts.ipynb", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 4 }