{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.7.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"\n#第一个代码单元格,可以根据自己的需求进行更改\nuseNgrok=True # 非必填 是否使用ngrok作为公网访问地址\nngrokTokenFile='/kaggle/input/ngroktoken/Authtoken.txt' # 非必填 存放ngrokToken的文件的路径\ndisableShared=True # 关闭默认的gradio.live穿透,如果没有配置ngrok或frp不要开启这个选项\n#双开设置\nuse2=True#是否开启两个webui\nngrokTokenFile1='/kaggle/input/ngroktoken/Authtoken1.txt' # 非必填 存放ngrokToken的文件的路径\ndisableShared1=True # 关闭默认的gradio.live穿透,如果没有配置ngrok或frp不要开启这个选项\n#第二个webui使用的模型\nusedCkpt1 = '9527.safetensors'\n# 模型目录 \n# Stable Diffusion模型请放在这里(不用填模型的文件名,只填模型的目录即可)\nmodelDirs = ['/kaggle/input/cetus-mix/',\n #'/kaggle/input/aom3ackpt',\n #'/kaggle/input/lora-9527-counterfeit',\n '/kaggle/input/dalcefo-painting',\n '/kaggle/input/9527-fp16'\n ]\n# 启动时默认加载的模型名称 可填全路径或者模型名称,名称建议带上文件名后缀\n# 如果你想加载7G以上的大模型,请填这个来加载,直接在Webui里切换模型容易爆内存\nusedCkpt = 'cetusMix.safetensors'\n# Hypernetworks超网络模型路径请放在这里:\nhypernetworksModelDirs = []\n# embeddings(pt文件)请放在这里:\nembeddingsModelDirs = ['/kaggle/input/1embeddings/embeddings',\n '/kaggle/input/1embeddings/embeddings/person'\n ] \n# Lora模型的数据集路径请写在这里:\nloraModelDirs = ['/kaggle/input/loras-bocchi-the-rock',\n '/kaggle/input/lora-1',\n '/kaggle/input/gachalora',\n '/kaggle/input/dksss-lora/lora',\n '/kaggle/input/dksss-lora/standingFullBodyWithBackgroundStyle_v10Offset'\n ] \n# ControlNet模型请放在这里:\nControlnet = ['/kaggle/input/controlnet-ext',\n '/kaggle/input/my-controlnet'\n #'/kaggle/input/t2i-adapters'\n ]\n\n# 插件列表: git仓库地址\n# 开启公网访问后将不能在webui界面安装插件,如果有需要安装的插件,把地址加入到下面\n# 不需要的插件在前面加 # ,插件地址之间需要用英语逗号隔开\nextensions = [\n 'https://github.com/Elldreth/loopback_scaler',\n 'https://github.com/jexom/sd-webui-depth-lib.git',\n 'https://github.com/AlUlkesh/stable-diffusion-webui-images-browser',\n 'https://github.com/camenduru/sd-civitai-browser',\n #'https://github.com/kohya-ss/sd-webui-additional-networks ',\n 'https://github.com/Mikubill/sd-webui-controlnet',\n 'https://github.com/camenduru/openpose-editor',\n 'https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN',\n 'https://github.com/opparco/stable-diffusion-webui-two-shot',\n #'https://github.com/minicacas/stable-diffusion-webui-composable-lora',\n 'https://github.com/DominikDoom/a1111-sd-webui-tagcomplete',\n #'https://github.com/hnmr293/posex',\n 'https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111',\n 'https://github.com/KohakuBlueleaf/a1111-sd-webui-locon',\n 'https://github.com/hnmr293/sd-webui-cutoff',\n 'https://github.com/hako-mikan/sd-webui-lora-block-weight',\n 'https://github.com/butaixianran/Stable-Diffusion-Webui-Civitai-Helper.git',\n #'https://github.com/Elldreth/loopback_scaler',\n 'https://gitcode.net/ranting8323/sd-webui-model-converter.git',\n 'https://github.com/catppuccin/stable-diffusion-webui.git'\n]\n#清理和打包生成的图片\nzip_output=True\nclear_output=True\nclear_output=False\n#zip_output=False\n# 安装目录 如果安装目录在输出目录(/kaggle/working)下,建议不要开启 Save Version 功能,因为每次结束运行都需要等待上传全部文件\ninstall_path=\"/kaggle/working\" \n\n# 配置启动参数\nvaeHalf=True # vae开启半精度,关闭效果更好,对速度没啥影响\nmodelHalf=True # 模型开启半精度,关闭效果更好,但生成速度减半\nconsoleProgressbars=False # 控制台显示进度条,关闭可以减少一些输出内容,查看日志时更快一点\nconsolePrompts=False # 同上 \nenableLoadByCopy=False # 是否使用copy的方式加载文件 启动变慢,且测试后没有提高模型切换速度\n# 重置变量 用于修改参数后重新安装\nreLoad = True\n# 如果需要重新安装,请注释这一行\nreLoad = False","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:02.980228Z","iopub.execute_input":"2023-04-21T12:49:02.980800Z","iopub.status.idle":"2023-04-21T12:49:03.001604Z","shell.execute_reply.started":"2023-04-21T12:49:02.980676Z","shell.execute_reply":"2023-04-21T12:49:02.998071Z"},"trusted":true},"execution_count":107,"outputs":[]},{"cell_type":"code","source":"#!rm -rf /kaggle/working/stable-diffusion-webui/extensions/\n#!rm -rf /kaggle/working/webui_env.tar.gz\n#!rm -rf /kaggle/working/venv.tar.gz\n#!rm -rf /kaggle/working/py31010_torch2_cu118.tar.gz\n#!rm -rf /kaggle/working/wwebui_env_torch2_cu118\n#!rm -rf /kaggle/working/","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:03.004103Z","iopub.execute_input":"2023-04-21T12:49:03.004438Z","iopub.status.idle":"2023-04-21T12:49:03.028191Z","shell.execute_reply.started":"2023-04-21T12:49:03.004406Z","shell.execute_reply":"2023-04-21T12:49:03.027029Z"},"trusted":true},"execution_count":108,"outputs":[]},{"cell_type":"code","source":"#!apt -y update -qq\n#!apt -y install -qq aria2\n#!echo '正在自动下载'\n#!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/datasets/sukaka/venv_ai_drow/resolve/main/venv.tar.gz -d /kaggle/working -o venv.tar.gz\n#!echo '下载成功'","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:03.029693Z","iopub.execute_input":"2023-04-21T12:49:03.030203Z","iopub.status.idle":"2023-04-21T12:49:03.039228Z","shell.execute_reply.started":"2023-04-21T12:49:03.030163Z","shell.execute_reply":"2023-04-21T12:49:03.038205Z"},"trusted":true},"execution_count":109,"outputs":[]},{"cell_type":"code","source":"#环境安装,只有第一次需要安装,安装完请注销\n#!conda create -n webui_env -c cctbx202208 -y\n#!source /opt/conda/bin/activate webui_env && conda install -q -c cctbx202208 python -y\n#!/opt/conda/envs/venv/bin/python3 -m pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 -f https://download.pytorch.org/whl/torch_stable.html\n#!/opt/conda/envs/venv/bin/python3 -m pip install -r /kaggle/input/huanjin/requirements_versions.txt\n#!conda install -c conda-forge conda-pack -y\n#!conda pack -n webui_env -o /kaggle/working/webui_env.tar.gz","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:03.042768Z","iopub.execute_input":"2023-04-21T12:49:03.043216Z","iopub.status.idle":"2023-04-21T12:49:03.050519Z","shell.execute_reply.started":"2023-04-21T12:49:03.043170Z","shell.execute_reply":"2023-04-21T12:49:03.049189Z"},"trusted":true},"execution_count":110,"outputs":[]},{"cell_type":"code","source":"# 这下面的是用于初始化一些值或者环境变量的,轻易别改\nfrom pathlib import Path\nimport shutil\nimport os\nimport time\nimport re\nimport gc\nimport subprocess\nimport multiprocessing\nos.environ['install_path'] = install_path\n\nngrokToken=''\nif Path(ngrokTokenFile).exists():\n with open(ngrokTokenFile,encoding = \"utf-8\") as nkfile:\n ngrokToken = nkfile.readline()\nngrokToken1=''\nif Path(ngrokTokenFile1).exists():\n with open(ngrokTokenFile1,encoding = \"utf-8\") as nkfile:\n ngrokToken1 = nkfile.readline()","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:03.052010Z","iopub.execute_input":"2023-04-21T12:49:03.053451Z","iopub.status.idle":"2023-04-21T12:49:03.090298Z","shell.execute_reply.started":"2023-04-21T12:49:03.053324Z","shell.execute_reply":"2023-04-21T12:49:03.089259Z"},"trusted":true},"execution_count":111,"outputs":[]},{"cell_type":"code","source":"# 绕过 os.systen 的限制执行命令\ndef run(shell:str):\n with open('run_cache.sh','w') as sh:\n sh.write(shell)\n !bash run_cache.sh\n\n# 连接多个路径字符串 让路径在shell命令中能正常的执行\ndef pathJoin(*paths:str):\n pathStr = ''\n for p in paths:\n pathStr += '\"'+p+'\"'\n pathStr = '\"*\"'.join(pathStr.split('*'))\n pathStr = '\"$\"'.join(pathStr.split('$'))\n pathStr = '\"(\"'.join(pathStr.split('('))\n pathStr = '\")\"'.join(pathStr.split(')'))\n pathStr = '\"{\"'.join(pathStr.split('{'))\n pathStr = '\"}\"'.join(pathStr.split('}'))\n pathStr = re.sub(r'\"\"','',pathStr)\n pathStr = re.sub(r'\\*{2,}','\"',pathStr)\n pathStr = re.sub(r'/{2,}','/',pathStr)\n pathStr = re.sub(r'/\\./','/',pathStr)\n return pathStr\n\n# 判断路径是不是一个文件或者可能指向一些文件\ndef pathIsFile(path):\n if Path(path).is_file():\n return True\n if re.search(r'\\.(ckpt|safetensors|png|jpg|txt|pt|pth|json|yaml|\\*)$',path):\n return True\n return False\n\ndef zip_clear():\n if zip_output:\n shutil.make_archive('/kaggle/working/图片', 'zip', '/kaggle/working/stable-diffusion-webui/outputs/')\n print('图片已压缩到output')\n if clear_output:\n def clear_directory(folder_path):\n for filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename)\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n clear_directory('/kaggle/working/stable-diffusion-webui/outputs/img2img-images')\n clear_directory('/kaggle/working/stable-diffusion-webui/outputs/txt2img-images')\n clear_directory('/kaggle/working/stable-diffusion-webui/outputs/img2img-grids')\n clear_directory('/kaggle/working/stable-diffusion-webui/outputs/txt2img-grids')\n clear_directory('/kaggle/working/stable-diffusion-webui/outputs/extras-images')\n print('清理完毕')\n \ndef libtcmalloc():\n !apt-get remove google-perftools -y\n %cd /kaggle\n !mkdir temp\n %cd temp\n !wget -qq --show-progres http://launchpadlibrarian.net/367274644/libgoogle-perftools-dev_2.5-2.2ubuntu3_amd64.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/google-perftools_2.5-2.2ubuntu3_all.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libtcmalloc-minimal4_2.5-2.2ubuntu3_amd64.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libgoogle-perftools4_2.5-2.2ubuntu3_amd64.deb\n !apt install -qq libunwind8-dev -y\n !dpkg -i *.deb\n !rm *.deb\n os.environ[\"LD_PRELOAD\"] = \"libtcmalloc.so\"\n \ndef venvinstall():\n %cd /opt/conda/envs\n if os.path.exists('venv'):\n print('环境已安装')\n else:\n %cd /kaggle/working/\n if not os.path.exists('venv.tar.gz'):\n print('环境包下载中')\n !wget https://huggingface.co/datasets/sukaka/venv_ai_drow/resolve/main/py31010_torch2_cu118.tar.gz -O venv.tar.gz\n print('环境包已下载')\n %cd /opt/conda/envs/\n !mkdir venv\n %cd venv\n print('环境安装中')\n !tar -xzf /kaggle/working/venv.tar.gz\n !source /opt/conda/bin/activate venv\n #if use2:\n #source_folder = '/opt/conda/envs/venv'\n #destination_folder = '/home/user/venv1'\n # 创建目标文件夹\n #if not os.path.exists(destination_folder):\n #os.makedirs(destination_folder)\n # 复制文件夹中的所有文件到目标文件夹中\n #shutil.copytree(source_folder, destination_folder)\n #!source /opt/conda/bin/activate venv1\n print('环境安装完毕')\n \n#安装webui\ndef install():\n %cd $install_path\n if reLoad:\n !rm -rf stable-diffusion-webui\n if Path(\"stable-diffusion-webui\").exists():\n print('stable-diffusion-webui已安装')\n else:\n print('stable-diffusion-webui安装中')\n #Download Automatic1111's Stable Diffusion Web UI\n !git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui\n %cd $install_path/stable-diffusion-webui/\n #Use lastest version\n !git checkout 22bcc7be428c94e9408f589966c2040187245d81\n with open('launch.py', 'r') as f:\n content = f.read()\n with open('launch.py', 'w') as f:\n f.write('import ssl\\n')\n f.write('ssl._create_default_https_context = ssl._create_unverified_context\\n')\n f.write(content)\n print('stable-diffusion-webui已安装')\n \ndef model_down_tool():\n !apt -y update -qq\n !apt -y install -qq aria2\n import ipywidgets as widgets\n from IPython.display import display\n #import os\n #install_path=\"/kaggle/working\" \n #os.environ['install_path'] = install_path\n import subprocess\n def download_with_aria2(link, file_path):\n # 设置aria2c命令行参数\n cmd = ['aria2c','--console-log-level=error', link, '-o', file_path, '-x', '16', '-s', '16', '-k', '1M']\n \n # 调用aria2c下载文件\n try:\n subprocess.run(cmd, check=True)\n print(f\"文件已保存到: {file_path}\")\n except subprocess.CalledProcessError as e:\n print(f\"下载失败: {e}\")\n def sdmodel_down(link, model_name):\n # 设置模型保存的文件夹路径\n %cd $install_path\n save_dir = 'stable-diffusion-webui/models/Stable-diffusion'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n # 设置模型保存的文件名\n file_name = f\"{model_name}\"\n file_path = os.path.join(save_dir, file_name)\n \n # 下载模型\n download_with_aria2(link, file_path)\n \n print(f\"模型已保存到: {file_path}\")\n\n def vae_down(link, model_name):\n # 设置模型保存的文件夹路径\n %cd $install_path\n save_dir = 'stable-diffusion-webui/models/VAE'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # 设置模型保存的文件名\n file_name = f\"{model_name}\"\n file_path = os.path.join(save_dir, file_name)\n\n # 下载模型\n cmd = ['aria2c','--console-log-level=error', link, '-o', file_path, '-x', '16', '-s', '16', '-k', '1M']\n\n print(f\"模型已保存到: {file_path}\")\n\n def vae_down(link, model_name):\n # 设置模型保存的文件夹路径\n %cd $install_path\n save_dir = 'stable-diffusion-webui/models/Lora'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # 设置模型保存的文件名\n file_name = f\"{model_name}\"\n file_path = os.path.join(save_dir, file_name)\n\n # 下载模型\n cmd = ['aria2c','--console-log-level=error', link, '-o', file_path, '-x', '16', '-s', '16', '-k', '1M']\n\n print(f\"模型已保存到: {file_path}\")\n \n model_type = widgets.Dropdown(\n options=['sd大模型', 'vae模型', 'Lora模型'],\n description='模型类型:',\n disabled=False,\n )\n\n link = widgets.Text(\n value='',\n placeholder='输入链接',\n description='链接:',\n disabled=False\n )\n\n model_name = widgets.Text(\n value='',\n placeholder='输入模型名称',\n description='模型名:',\n disabled=False\n )\n\n def on_submit(btn):\n if model_type.value == 'sd大模型':\n sdmodel_down(link.value, model_name.value)\n elif model_type.value == 'vae模型':\n vae_down(link.value, model_name.value)\n else:\n lora_down(link.value, model_name.value)\n\n submit = widgets.Button(description=\"提交\")\n submit.on_click(on_submit)\n\n display(model_type, link, model_name, submit)\n# 链接模型文件\n# link models files\ndef link_models():\n print(('复制' if enableLoadByCopy else '链接') + '模型文件')\n if enableLoadByCopy:\n print('如果出现 No such file or directory 错误,可以忽略')\n !mkdir -p $install_path/stable-diffusion-webui/models/Stable-diffusion # 模型和文件目录\n !mkdir -p $install_path/stable-diffusion-webui/models/hypernetworks # 超网络模型目录\n !mkdir -p $install_path/stable-diffusion-webui/embeddings # 画风模型目录\n !mkdir -p $install_path/stable-diffusion-webui/models/Lora # lora 文件目录\n !mkdir -p $install_path/stable-diffusion-webui/extensions/sd-webui-controlnet/models/ \n for path in modelDirs:\n if pathIsFile(path):\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path) +' $install_path/stable-diffusion-webui/models/Stable-diffusion')\n continue\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.ckpt') +' $install_path/stable-diffusion-webui/models/Stable-diffusion/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.safetensors') +' $install_path/stable-diffusion-webui/models/Stable-diffusion/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.png') +' $install_path/stable-diffusion-webui/models/Stable-diffusion/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.jpg') +' $install_path/stable-diffusion-webui/models/Stable-diffusion/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.pt') +' $install_path/stable-diffusion-webui/models/Stable-diffusion/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/hypernetworks*/*.*') +' $install_path/stable-diffusion-webui/models/hypernetworks/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/embeddings*/*.*') +' $install_path/stable-diffusion-webui/embeddings/')\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/lora*/*.*') +' $install_path/stable-diffusion-webui/models/Lora/')\n for path in loraModelDirs: \n if pathIsFile(path):\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path) +' $install_path/stable-diffusion-webui/models/Lora/')\n continue\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.*') +' $install_path/stable-diffusion-webui/models/Lora/')\n for path in embeddingsModelDirs: \n if pathIsFile(path):\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path) +' $install_path/stable-diffusion-webui/embeddings/')\n continue\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.*') +' $install_path/stable-diffusion-webui/embeddings/')\n for path in hypernetworksModelDirs: \n if pathIsFile(path):\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path) +' $install_path/stable-diffusion-webui/models/hypernetworks/')\n continue\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path,'/*.*') +' $install_path/stable-diffusion-webui/models/hypernetworks/')\n if Path(install_path+'/stable-diffusion-webui/extensions/sd-webui-controlnet').exists():\n for path in Controlnet: \n if pathIsFile(path):\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f '+ pathJoin(path) +' $install_path/stable-diffusion-webui/extensions/sd-webui-controlnet/models/')\n continue\n os.system(('cp -n' if enableLoadByCopy else 'ln -s')+' -f ' + pathJoin(path,'/*.*') +' $install_path/stable-diffusion-webui/extensions/sd-webui-controlnet/models/')\n %cd $install_path/stable-diffusion-webui/extensions/sd-webui-controlnet/models/\n !rm -f \\*.* # 删除因为文件夹没有对应文件导致复制了一个'*.*'的文件\n !echo controlnetModel文件列表\n !ls\n\n %cd $install_path/stable-diffusion-webui/models/Stable-diffusion\n !rm -f \\*.* & rm -f \\*.ckpt & rm -f \\*.safetensors & rm -f \\*.png & rm -f \\*.pt & rm -f \\*.jpg # 删除因为文件夹没有对应文件导致复制了一个'*.*'的文件\n !echo 模型文件列表\n !ls\n %cd $install_path/stable-diffusion-webui/models/hypernetworks\n !rm -f \\*.* # 删除因为文件夹没有对应文件导致复制了一个'*.*'的文件\n !echo hypernetworks超网络模型文件列表\n !ls\n %cd $install_path/stable-diffusion-webui/embeddings\n !rm -f \\*.* # 删除因为文件夹没有对应文件导致复制了一个'*.*'的文件\n !echo embedding文件列表\n !ls\n %cd $install_path/stable-diffusion-webui/models/Lora\n !rm -f \\*.* # 删除因为文件夹没有对应文件导致复制了一个'*.*'的文件\n !echo lora文件列表\n !ls\n\n# 安装插件\ndef install_extensions():\n if reLoad:\n print('安装插件,此处出现红条是正常的')\n %cd $install_path/stable-diffusion-webui\n !mkdir -p extensions\n %cd extensions\n for ex in extensions:\n os.system('git clone '+ex)\n print('插件已安装')\n \n# ngrok\ndef ngrok_start1():\n if useNgrok == True and ngrokToken1 != '':\n print('use nrgok')\n from pyngrok import conf, ngrok\n conf.get_default().auth_token = ngrokToken1\n conf.get_default().monitor_thread = False\n ssh_tunnels = ngrok.get_tunnels(conf.get_default())\n if len(ssh_tunnels) == 0:\n ssh_tunnel = ngrok.connect(7861, bind_tls=True)\n print('address:'+ssh_tunnel.public_url)\n else:\n print('address:'+ssh_tunnels[0].public_url)\n else:\n print('skip start ngrok')\n \ndef ngrok_start():\n if useNgrok == True and ngrokToken != '':\n print('use nrgok')\n from pyngrok import conf, ngrok\n conf.get_default().auth_token = ngrokToken\n conf.get_default().monitor_thread = False\n ssh_tunnels = ngrok.get_tunnels(conf.get_default())\n if len(ssh_tunnels) == 0:\n ssh_tunnel = ngrok.connect(7860)\n print('address:'+ssh_tunnel.public_url)\n else:\n print('address:'+ssh_tunnels[0].public_url)\n else:\n print('skip start ngrok')\ndef ready_webui():\n install()\n install_extensions()\n link_models()\n !pip install pyngrok\n \ndef start1():\n if use2:\n ngrok_start1()\n %cd $install_path/stable-diffusion-webui\n args = '--disable-safe-unpickle'\n if not disableShared1:\n args += ' --share'\n args += ' --skip-torch-cuda-test'\n args += ' --xformers' # 已经可用\n args += ' --lowram'# 降低爆内存几率\n args += ' --no-hashing'\n args += ' --disable-nan-check'\n args += ' --opt-channelslast'\n args += ' --enable-insecure-extension-access'\n if usedCkpt1 is not None and usedCkpt1 != '': # 设置启动时默认加载的模型\n if '.' in usedCkpt1:\n if '/' in usedCkpt:\n args += ' --ckpt='+ pathJoin(usedCkpt1)\n else:\n args += ' --ckpt='+ pathJoin(install_path, '/stable-diffusion-webui/models/Stable-diffusion/', usedCkpt1)\n else:\n for x in ['.ckpt','.safetensors']:\n if Path(install_path+'/stable-diffusion-webui/models/Stable-diffusion/' + usedCkpt1+x).exists():\n args += ' --ckpt='+ pathJoin(install_path, '/stable-diffusion-webui/models/Stable-diffusion/', usedCkpt1, x)\n break\n if vaeHalf is False: \n args += ' --no-half-vae'\n if modelHalf is False:\n args += ' --no-half'\n if consoleProgressbars is False:\n args += ' --disable-console-progressbars'\n if consolePrompts is True:\n args += ' --enable-console-prompts'\n os.environ['COMMANDLINE_ARGS']=args\n !echo COMMANDLINE_ARGS=$COMMANDLINE_ARGS\n !pm2 log & /opt/conda/envs/venv/bin/python3 launch.py --port=7861 --device-id=1 --no-gradio-queue\n pass\n\ndef start():\n %cd $install_path/stable-diffusion-webui\n args = '--disable-safe-unpickle'\n if not disableShared:\n args += ' --share'\n args += ' --skip-torch-cuda-test'\n args += ' --xformers' # 已经可用\n args += ' --lowram'# 降低爆内存几率\n args += ' --no-hashing'\n args += ' --disable-nan-check'\n args += ' --opt-channelslast'\n args += ' --enable-insecure-extension-access'\n if usedCkpt is not None and usedCkpt != '': # 设置启动时默认加载的模型\n if '.' in usedCkpt:\n if '/' in usedCkpt:\n args += ' --ckpt='+ pathJoin(usedCkpt)\n else:\n args += ' --ckpt='+ pathJoin(install_path, '/stable-diffusion-webui/models/Stable-diffusion/', usedCkpt)\n else:\n for x in ['.ckpt','.safetensors']:\n if Path(install_path+'/stable-diffusion-webui/models/Stable-diffusion/' + usedCkpt+x).exists():\n args += ' --ckpt='+ pathJoin(install_path, '/stable-diffusion-webui/models/Stable-diffusion/', usedCkpt, x)\n break\n if vaeHalf is False: \n args += ' --no-half-vae'\n if modelHalf is False:\n args += ' --no-half'\n if consoleProgressbars is False:\n args += ' --disable-console-progressbars'\n if consolePrompts is True:\n args += ' --enable-console-prompts'\n os.environ['COMMANDLINE_ARGS']=args\n !echo COMMANDLINE_ARGS=$COMMANDLINE_ARGS\n !pm2 log & /opt/conda/envs/venv/bin/python3 launch.py --no-gradio-queue\ndef main():\n startTicks = time.time()\n isInstall = True if os.getenv('IsInstall','False') == 'True' else False\n if isInstall is False or reLoad:\n libtcmalloc()\n if __name__ == '__main__':\n t1 = multiprocessing.Process(target=venvinstall)\n t2 = multiprocessing.Process(target=ready_webui)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n isInstall = True\n os.environ['IsInstall'] = 'True'\n ngrok_start()\n ticks = time.time()\n print(\"加载耗时:\",(ticks - startTicks),\"s\")\n gc.collect()\n if __name__ == '__main__':\n p1 = multiprocessing.Process(target=start)\n p2 = multiprocessing.Process(target=start1)\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:03.142873Z","iopub.execute_input":"2023-04-21T12:49:03.143286Z","iopub.status.idle":"2023-04-21T12:49:06.266816Z","shell.execute_reply.started":"2023-04-21T12:49:03.143248Z","shell.execute_reply":"2023-04-21T12:49:06.265641Z"},"trusted":true},"execution_count":112,"outputs":[]},{"cell_type":"code","source":"#模型下载器\n#model_down_tool()\n#safetensors","metadata":{"execution":{"iopub.status.busy":"2023-04-21T12:49:06.269289Z","iopub.execute_input":"2023-04-21T12:49:06.269812Z","iopub.status.idle":"2023-04-21T12:49:06.325740Z","shell.execute_reply.started":"2023-04-21T12:49:06.269772Z","shell.execute_reply":"2023-04-21T12:49:06.324824Z"},"trusted":true},"execution_count":113,"outputs":[]},{"cell_type":"code","source":"# start\nmain()","metadata":{"_kg_hide-input":true,"_kg_hide-output":false,"execution":{"iopub.status.busy":"2023-04-21T12:49:06.327495Z","iopub.execute_input":"2023-04-21T12:49:06.328182Z","iopub.status.idle":"2023-04-21T13:14:56.469299Z","shell.execute_reply.started":"2023-04-21T12:49:06.328142Z","shell.execute_reply":"2023-04-21T13:14:56.467190Z"},"trusted":true},"execution_count":114,"outputs":[{"name":"stdout","text":"use nrgok\naddress:https://9398-35-193-9-137.ngrok-free.app\n加载耗时: 0.32435154914855957 s\n/kaggle/working/stable-diffusion-webuiuse nrgok\n\naddress:https://17f4-35-193-9-137.ngrok-free.app\n/kaggle/working/stable-diffusion-webui\nCOMMANDLINE_ARGS=--disable-safe-unpickle --skip-torch-cuda-test --xformers --lowram --no-hashing --disable-nan-check --opt-channelslast --enable-insecure-extension-access --ckpt=\"/kaggle/working/stable-diffusion-webui/models/Stable-diffusion/cetusMix.safetensors\" --disable-console-progressbars\nCOMMANDLINE_ARGS=--disable-safe-unpickle --skip-torch-cuda-test --xformers --lowram --no-hashing --disable-nan-check --opt-channelslast --enable-insecure-extension-access --ckpt=\"/kaggle/working/stable-diffusion-webui/models/Stable-diffusion/9527.safetensors\" --disable-console-progressbars\n/bin/bash: pm2: command not found\nPython 3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]\nCommit hash: 22bcc7be428c94e9408f589966c2040187245d81\nInstalling requirements for Web UI\n/bin/bash: pm2: command not found\nPython 3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]\nCommit hash: 22bcc7be428c94e9408f589966c2040187245d81\nInstalling requirements for Web UI\n\n\n\nLaunching Web UI with arguments: --no-gradio-queue --disable-safe-unpickle --skip-torch-cuda-test --xformers --lowram --no-hashing --disable-nan-check --opt-channelslast --enable-insecure-extension-access --ckpt=/kaggle/working/stable-diffusion-webui/models/Stable-diffusion/cetusMix.safetensors --disable-console-progressbars\n\n\n\nLaunching Web UI with arguments: --port=7861 --device-id=1 --no-gradio-queue --disable-safe-unpickle --skip-torch-cuda-test --xformers --lowram --no-hashing --disable-nan-check --opt-channelslast --enable-insecure-extension-access --ckpt=/kaggle/working/stable-diffusion-webui/models/Stable-diffusion/9527.safetensors --disable-console-progressbars\n/opt/conda/envs/venv/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:5: UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in 0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in torchvision.transforms.v2.functional.\n warnings.warn(\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nLoading weights [None] from /kaggle/working/stable-diffusion-webui/models/Stable-diffusion/cetusMix.safetensors\n/opt/conda/envs/venv1/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:5: UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in 0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in torchvision.transforms.v2.functional.\n warnings.warn(\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nLoading weights [None] from /kaggle/working/stable-diffusion-webui/models/Stable-diffusion/9527.safetensors\nCreating model from config: /kaggle/working/stable-diffusion-webui/configs/v1-inference.yaml\nLatentDiffusion: Running in eps-prediction mode\nDiffusionWrapper has 859.52 M params.\nApplying xformers cross attention optimization.\nTextual inversion embeddings loaded(0): \nModel loaded in 43.1s (load weights from disk: 32.6s, create model: 1.6s, apply weights to model: 5.6s, apply channels_last: 1.4s, apply half(): 1.1s, move model to device: 0.8s).\nRunning on local URL: http://127.0.0.1:7861\n\nTo create a public link, set `share=True` in `launch()`.\nStartup time: 58.5s (import torch: 3.9s, import gradio: 2.3s, import ldm: 2.5s, other imports: 2.5s, setup codeformer: 0.2s, load scripts: 1.7s, load SD checkpoint: 43.1s, create ui: 1.8s, gradio launch: 0.4s).\nCreating model from config: /kaggle/working/stable-diffusion-webui/configs/v1-inference.yaml\nLatentDiffusion: Running in eps-prediction mode\nDiffusionWrapper has 859.52 M params.\nApplying xformers cross attention optimization.\nTextual inversion embeddings loaded(0): \nModel loaded in 61.5s (load weights from disk: 52.6s, create model: 1.3s, apply weights to model: 4.7s, apply channels_last: 1.1s, apply half(): 1.1s, move model to device: 0.7s).\nRunning on local URL: http://127.0.0.1:7860\n\nTo create a public link, set `share=True` in `launch()`.\nStartup time: 73.6s (import torch: 2.0s, import gradio: 1.6s, import ldm: 2.7s, other imports: 1.6s, setup codeformer: 0.1s, load scripts: 2.4s, load SD checkpoint: 61.5s, create ui: 1.3s, gradio launch: 0.3s).\nClosing server running on port: 7861\nRestarting UI...\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nTraceback (most recent call last):\n File \"/opt/conda/envs/venv1/lib/python3.10/site-packages/gradio/networking.py\", line 119, in start_server\n s.bind((LOCALHOST_NAME, server_port))\nOSError: [Errno 98] Address already in use\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/kaggle/working/stable-diffusion-webui/launch.py\", line 358, in \n start()\n File \"/kaggle/working/stable-diffusion-webui/launch.py\", line 353, in start\n webui.webui()\n File \"/kaggle/working/stable-diffusion-webui/webui.py\", line 257, in webui\n app, local_url, share_url = shared.demo.launch(\n File \"/opt/conda/envs/venv1/lib/python3.10/site-packages/gradio/blocks.py\", line 1454, in launch\n server_name, server_port, local_url, app, server = networking.start_server(\n File \"/opt/conda/envs/venv1/lib/python3.10/site-packages/gradio/networking.py\", line 122, in start_server\n raise OSError(\nOSError: Port 7861 is in use. If a gradio.Blocks is running on the port, you can close() it or gradio.close_all().\nClosing server running on port: 7860\nRestarting UI...\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nRunning on local URL: http://127.0.0.1:7860\n\nTo create a public link, set `share=True` in `launch()`.\nStartup time: 2.1s (load scripts: 0.6s, create ui: 1.4s).\nLoading weights [None] from /kaggle/working/stable-diffusion-webui/models/Stable-diffusion/9527.safetensors\nApplying xformers cross attention optimization.\nWeights loaded in 25.4s (load weights from disk: 23.4s, apply weights to model: 1.3s, move model to device: 0.6s).\nError completing request\nArguments: ('', 'https://github.com/Elldreth/loopback_scaler') {}\nTraceback (most recent call last):\n File \"/kaggle/working/stable-diffusion-webui/modules/call_queue.py\", line 56, in f\n res = list(func(*args, **kwargs))\n File \"/kaggle/working/stable-diffusion-webui/modules/ui_extensions.py\", line 144, in install_extension_from_url\n assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'\nAssertionError: Extension directory already exists: /kaggle/working/stable-diffusion-webui/extensions/loopback_scaler\n\nClosing server running on port: 7860\nRestarting UI...\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nRunning on local URL: http://127.0.0.1:7860\n\nTo create a public link, set `share=True` in `launch()`.\nStartup time: 2.3s (load scripts: 0.7s, create ui: 1.1s, gradio launch: 0.5s).\nIf submitting an issue on github, please provide the full startup log for debugging purposes.\n\nInitializing Dreambooth\nDreambooth revision: 926ae204ef5de17efca2059c334b6098492a0641\nSuccessfully installed accelerate-0.18.0 astunparse-1.6.3 bitsandbytes-0.35.4 dadaptation-1.5 diffusers-0.14.0 discord-webhook-1.1.0 fastapi-0.94.1 gast-0.4.0 gitpython-3.1.31 google-auth-oauthlib-0.4.6 google-pasta-0.2.0 h5py-3.8.0 importlib-metadata-6.5.0 jax-0.4.8 keras-2.12.0 libclang-16.0.0 lion-pytorch-0.0.7 ml-dtypes-0.1.0 opt-einsum-3.3.0 protobuf-4.22.3 requests-2.28.2 tensorboard-2.12.0 tensorflow-2.12.0 tensorflow-estimator-2.12.0 tensorflow-io-gcs-filesystem-0.32.0 tqdm-4.64.1 transformers-4.26.1 wrapt-1.14.1 zipp-3.15.0\n[+] xformers version 0.0.17rc482 installed.\n[+] torch version 2.0.0+cu118 installed.\n[+] torchvision version 0.15.1+cu118 installed.\n[+] accelerate version 0.18.0 installed.\n[+] diffusers version 0.14.0 installed.\n[+] transformers version 4.26.1 installed.\n[+] bitsandbytes version 0.35.4 installed.\n\nClosing server running on port: 7860\nRestarting UI...\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nException importing api\nTraceback (most recent call last):\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1093, in _get_module\n File \"/opt/conda/envs/venv/lib/python3.10/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n File \"\", line 1050, in _gcd_import\n File \"\", line 1027, in _find_and_load\n File \"\", line 1006, in _find_and_load_unlocked\n File \"\", line 688, in _load_unlocked\n File \"\", line 883, in exec_module\n File \"\", line 241, in _call_with_frames_removed\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py\", line 20, in \n from .image_processing_clip import CLIPImageProcessor\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py\", line 23, in \n from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/image_processing_utils.py\", line 24, in \n from .feature_extraction_utils import BatchFeature as BaseBatchFeature\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/feature_extraction_utils.py\", line 28, in \n from .utils import (\nImportError: cannot import name 'is_torch_dtype' from 'transformers.utils' (/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/__init__.py)\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/scripts/api.py\", line 27, in \n from dreambooth.dataclasses.db_config import from_file, DreamboothConfig\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/dreambooth/dataclasses/db_config.py\", line 10, in \n from dreambooth.utils.image_utils import get_scheduler_names # noqa\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/dreambooth/utils/image_utils.py\", line 11, in \n from diffusers.schedulers import KarrasDiffusionSchedulers\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/__init__.py\", line 55, in \n from .pipelines import (\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/__init__.py\", line 44, in \n from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/alt_diffusion/__init__.py\", line 32, in \n from .pipeline_alt_diffusion import AltDiffusionPipeline\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py\", line 20, in \n from transformers import CLIPFeatureExtractor, XLMRobertaTokenizer\n File \"\", line 1075, in _handle_fromlist\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1084, in __getattr__\n # Needed for autocompletion in an IDE\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1083, in __getattr__\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1095, in _get_module\n if name in self._objects:\nRuntimeError: Failed to import transformers.models.clip.feature_extraction_clip because of the following error (look up to see its traceback):\ncannot import name 'is_torch_dtype' from 'transformers.utils' (/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/__init__.py)\nError loading script: main.py\nTraceback (most recent call last):\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1093, in _get_module\n File \"/opt/conda/envs/venv/lib/python3.10/importlib/__init__.py\", line 126, in import_module\n return _bootstrap._gcd_import(name[level:], package, level)\n File \"\", line 1050, in _gcd_import\n File \"\", line 1027, in _find_and_load\n File \"\", line 1006, in _find_and_load_unlocked\n File \"\", line 688, in _load_unlocked\n File \"\", line 883, in exec_module\n File \"\", line 241, in _call_with_frames_removed\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py\", line 20, in \n from .image_processing_clip import CLIPImageProcessor\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py\", line 23, in \n from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/image_processing_utils.py\", line 24, in \n from .feature_extraction_utils import BatchFeature as BaseBatchFeature\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/feature_extraction_utils.py\", line 28, in \n from .utils import (\nImportError: cannot import name 'is_torch_dtype' from 'transformers.utils' (/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/__init__.py)\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/kaggle/working/stable-diffusion-webui/modules/scripts.py\", line 256, in load_scripts\n script_module = script_loading.load_module(scriptfile.path)\n File \"/kaggle/working/stable-diffusion-webui/modules/script_loading.py\", line 11, in load_module\n module_spec.loader.exec_module(module)\n File \"\", line 883, in exec_module\n File \"\", line 241, in _call_with_frames_removed\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/scripts/main.py\", line 11, in \n from dreambooth.diff_to_sd import compile_checkpoint\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/dreambooth/diff_to_sd.py\", line 15, in \n from diffusers import UNet2DConditionModel\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/__init__.py\", line 55, in \n from .pipelines import (\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/__init__.py\", line 44, in \n from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/alt_diffusion/__init__.py\", line 32, in \n from .pipeline_alt_diffusion import AltDiffusionPipeline\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py\", line 20, in \n from transformers import CLIPFeatureExtractor, XLMRobertaTokenizer\n File \"\", line 1075, in _handle_fromlist\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1084, in __getattr__\n # Needed for autocompletion in an IDE\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1083, in __getattr__\n File \"/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/import_utils.py\", line 1095, in _get_module\n if name in self._objects:\nRuntimeError: Failed to import transformers.models.clip.feature_extraction_clip because of the following error (look up to see its traceback):\ncannot import name 'is_torch_dtype' from 'transformers.utils' (/opt/conda/envs/venv/lib/python3.10/site-packages/transformers/utils/__init__.py)\n\nRunning on local URL: http://127.0.0.1:7860\n\nTo create a public link, set `share=True` in `launch()`.\nError executing callback app_started_callback for /kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/scripts/api.py\nTraceback (most recent call last):\n File \"/kaggle/working/stable-diffusion-webui/modules/script_callbacks.py\", line 107, in app_started_callback\n c.callback(demo, app)\n File \"/kaggle/working/stable-diffusion-webui/extensions/sd_dreambooth_extension/scripts/api.py\", line 524, in dreambooth_api\n model_cfg: DreamboothConfig = Body(description=\"The config to save\"),\nNameError: name 'DreamboothConfig' is not defined\n\nStartup time: 2.6s (load scripts: 0.8s, create ui: 1.6s).\nClosing server running on port: 7860\nRestarting UI...\nCivitai Helper: Get Custom Model Folder\nCivitai Helper: Load setting from: /kaggle/working/stable-diffusion-webui/extensions/Stable-Diffusion-Webui-Civitai-Helper/setting.json\nCivitai Helper: No setting file, use default\nAdditional Network extension not installed, Only hijack built-in lora\nLoCon Extension hijack built-in lora successfully\nRunning on local URL: http://127.0.0.1:7860\n\nTo create a public link, set `share=True` in `launch()`.\nStartup time: 2.4s (load scripts: 0.6s, create ui: 1.6s).\n","output_type":"stream"},{"traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_24/4281207697.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# start\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/tmp/ipykernel_24/4287389366.py\u001b[0m in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 419\u001b[0m \u001b[0mp1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 420\u001b[0m \u001b[0mp2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 421\u001b[0;31m \u001b[0mp1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 422\u001b[0m \u001b[0mp2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.7/multiprocessing/process.py\u001b[0m in \u001b[0;36mjoin\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 138\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent_pid\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetpid\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'can only join a child process'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_popen\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'can only join a started process'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 140\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_popen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 141\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mres\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0m_children\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdiscard\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.7/multiprocessing/popen_fork.py\u001b[0m in \u001b[0;36mwait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;31m# This shouldn't block if wait() returned successfully.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 48\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpoll\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mWNOHANG\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtimeout\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0.0\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 49\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreturncode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.7/multiprocessing/popen_fork.py\u001b[0m in \u001b[0;36mpoll\u001b[0;34m(self, flag)\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreturncode\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m \u001b[0mpid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwaitpid\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflag\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 29\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mOSError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;31m# Child process not yet created. See #1731717\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "],"ename":"KeyboardInterrupt","evalue":"","output_type":"error"},{"name":"stdout","text":"^C\nInterrupted with signal 2 in \n","output_type":"stream"}]},{"cell_type":"code","source":"#zip_clear()","metadata":{"execution":{"iopub.status.busy":"2023-04-21T13:14:56.471916Z","iopub.status.idle":"2023-04-21T13:14:56.478664Z","shell.execute_reply.started":"2023-04-21T13:14:56.478316Z","shell.execute_reply":"2023-04-21T13:14:56.478348Z"},"trusted":true},"execution_count":null,"outputs":[]}]}