ACCA225 commited on
Commit
574459f
1 Parent(s): 0fc48f7

Upload kaggle-lora-trainV1.0.ipynb

Browse files
Lora训练用-CHRISTINA改/kaggle-lora-trainV1.0.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"pygments_lexer":"ipython3","nbconvert_exporter":"python","version":"3.6.4","file_extension":".py","codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"#开启图形化配置参数\nuse_gui = 1\n\n#一键倒入炼丹参数\nconfig_file='/kaggle/input/input-qin/qin.toml'\n\n#炼丹参数配置,如果使用gui或一键导入炼丹参数,请忽略以下\npretrained_model = \"/kaggle/input/cetus-mix/cetusMix.safetensors\"\nis_v2_model = 0\nparameterization = 0\ntrain_data_dir = \"/kaggle/input/input-qin/input_qin\"\nreg_data_dir = \"\"\nnetwork_module = \"networks.lora\"\nnetwork_weights = \"\"\nnetwork_dim = 32\nnetwork_alpha = 32\nresolution = \"512,768\"\nbatch_size = 1\nmax_train_epoches = 10\nsave_every_n_epochs = 2\ntrain_unet_only = 0\ntrain_text_encoder_only = 0\nstop_text_encoder_training = 0\nnoise_offset = \"0\"\nkeep_tokens = 0\nmin_snr_gamma = 0\nlr = \"1e-4\"\nunet_lr = \"1e-4\"\ntext_encoder_lr = \"1e-5\"\nlr_scheduler = \"cosine_with_restarts\"\nlr_warmup_steps = 0\nlr_restart_cycles = 1\noutput_name = \"qin\"\nsave_model_as = \"safetensors\"\nsave_state = 0\nresume = \"\"\nmin_bucket_reso = 256\nmax_bucket_reso = 1024\npersistent_data_loader_workers = 0\nclip_skip = 2\n\n# Optimizer settings\noptimizer_type = \"AdamW8bit\" # Optimizer type | Available options: AdamW AdamW8bit Lion SGDNesterov SGDNesterov8bit DAdaptation AdaFactor\n\n# LyCORIS training settings\nalgo = \"lora\" # LyCORIS network algo | Available options: lora, loha, lokr, ia3, dylora. lora is equivalent to locon\nconv_dim = 4 # conv dim | Similar to network_dim, recommended to be 4\nconv_alpha = 4 # conv alpha | Similar to network_alpha, can use same or smaller values than conv_dim\ndropout = \"0\" # dropout | Dropout probability, 0 means no dropout, larger values mean more dropout. LoHa/LoKr/(IA)^3 do not currently support dropout","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:31.125957Z","iopub.execute_input":"2023-04-26T10:15:31.126528Z","iopub.status.idle":"2023-04-26T10:15:31.136863Z","shell.execute_reply.started":"2023-04-26T10:15:31.126473Z","shell.execute_reply":"2023-04-26T10:15:31.135626Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#开启第二个炼丹炉\nuse2 = 0\n\n#开启图形化配置参数\nuse_gui1 = 1\n\n#使用config一键倒入炼丹参数\nconfig_file1='/kaggle/input/input-qin/qin.toml'\n\n#炼丹参数配置,如果配置了config,请忽略以下\npretrained_model1 = \"/kaggle/input/cetus-mix/cetusMix.safetensors\"\nis_v2_model1 = 0\nparameterization1 = 0\ntrain_data_dir1 = \"/kaggle/input/input-qin/input_qin\"\nreg_data_dir1 = \"\"\nnetwork_module1 = \"networks.lora\"\nnetwork_weights1 = \"\"\nnetwork_dim1 = 32\nnetwork_alpha1 = 32\nresolution1 = \"512,768\"\nbatch_size1 = 1\nmax_train_epoches1 = 10\nsave_every_n_epochs1 = 2\ntrain_unet_only1 = 0\ntrain_text_encoder_only1 = 0\nstop_text_encoder_training1 = 0\nnoise_offset1 = \"0\"\nkeep_tokens1 = 0\nmin_snr_gamma1 = 0\nlr1 = \"1e-4\"\nunet_lr1 = \"1e-4\"\ntext_encoder_lr1 = \"1e-5\"\nlr_scheduler1 = \"cosine_with_restarts\"\nlr_warmup_steps1 = 0\nlr_restart_cycles1 = 1\noutput_name1 = \"qin1\"\nsave_model_as1 = \"safetensors\"\nsave_state1 = 0\nresume1 = \"\"\nmin_bucket_reso1 = 256\nmax_bucket_reso1 = 1024\npersistent_data_loader_workers1 = 0\nclip_skip1 = 2\n\n# Optimizer settings\noptimizer_type1 = \"AdamW8bit\" # Optimizer type | Available options: AdamW AdamW8bit Lion SGDNesterov SGDNesterov8bit DAdaptation AdaFactor\n\n# LyCORIS training settings\nalgo1 = \"lora\" # LyCORIS network algo | Available options: lora, loha, lokr, ia3, dylora. lora is equivalent to locon\nconv_dim1 = 4 # conv dim | Similar to network_dim, recommended to be 4\nconv_alpha1 = 4 # conv alpha | Similar to network_alpha, can use same or smaller values than conv_dim\ndropout1 = \"0\" # dropout | Dropout probability, 0 means no dropout, larger values mean more dropout. LoHa/LoKr/(IA)^3 do not currently support dropout","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:31.139589Z","iopub.execute_input":"2023-04-26T10:15:31.139986Z","iopub.status.idle":"2023-04-26T10:15:31.155477Z","shell.execute_reply.started":"2023-04-26T10:15:31.139941Z","shell.execute_reply":"2023-04-26T10:15:31.154430Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#安装目录\ninstall_path='/kaggle/working/'\n\n#内网传透\nuseNgrok = 1 # 非必填 是否使用ngrok作为公网访问地址\n#透tesoerboard\nngrokTokenFile='/kaggle/input/ngroktoken/Authtoken.txt' # 非必填 存放ngrokToken的文件的路径\nport = 6006\n#透第一个炼丹炉的gui\nngrokTokenFile1='/kaggle/input/ngroktoken/Authtoken1.txt' # 非必填 存放ngrokToken的文件的路径\nport1 = 28000\n#透第二个炼丹炉的gui\nngrokTokenFile2='/kaggle/input/ngroktoken/Authtoken2.txt' # 非必�� 存放ngrokToken的文件的路径\nport2 = 28001\n\n#优化\nmulti_gpu = 0 #没卵用的参数,建议别开,降低速度\nlowram = 0\ncpu_threads = 0\n\n#重新安装\nreLoad = 0\n\n#自动打包\nzip_output = 1\n#自动清理\nclear_output = 1\n#自动上传抱脸\nupload = 1\nhugTokenFile='/kaggle/input/hugfacetoken/hugfacetoken.txt' # 非必填 存放hugfacetoken的文件的路径\n#仓库名\nrepo_id=\"sukaka/train_model\"\n#上传仓库路径\npath_in_repo=\"模型.zip\"","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:31.157176Z","iopub.execute_input":"2023-04-26T10:15:31.157555Z","iopub.status.idle":"2023-04-26T10:15:31.172260Z","shell.execute_reply.started":"2023-04-26T10:15:31.157518Z","shell.execute_reply":"2023-04-26T10:15:31.171202Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#!rm -rf /kaggle/working/","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:31.173617Z","iopub.execute_input":"2023-04-26T10:15:31.174201Z","iopub.status.idle":"2023-04-26T10:15:31.187246Z","shell.execute_reply.started":"2023-04-26T10:15:31.174162Z","shell.execute_reply":"2023-04-26T10:15:31.186253Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import shutil\nimport gc\nimport os\nimport time\nimport subprocess\nimport multiprocessing\nimport inspect\nfrom pathlib import Path\nos.environ['install_path'] = install_path\nos.environ['HF_HOME'] = 'huggingface'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"PL_TORCH_DISTRIBUTED_BACKEND\"] = \"gloo\"\n\ndef guy(gpu_id,port: int):\n import json\n import toml\n import sys\n import uvicorn\n import subprocess\n import webbrowser\n import argparse\n from fastapi import FastAPI, Request, BackgroundTasks\n from fastapi.responses import FileResponse\n from fastapi.staticfiles import StaticFiles\n from datetime import datetime\n from threading import Lock\n \n app = FastAPI()\n\n lock = Lock()\n\n parser = argparse.ArgumentParser(description=\"GUI for training network\")\n parser.add_argument(\"--port\", type=int, default=port, help=\"Port to run the server on\")\n\n def run_train(toml_path: str):\n print(f\"Training started with config file / 训练开始,使用配置文件: {toml_path}\")\n cmd = f\"conda run -n venv --no-capture-output accelerate launch --gpu_ids {gpu_id} sd-scripts/train_network.py --config_file {toml_path}\"\n try:\n result = subprocess.run(cmd, shell=True, env=os.environ)\n if result.returncode != 0:\n print(f\"Training failed / 训练失败\")\n else:\n print(f\"Training finished / 训练完成\")\n except Exception as e:\n print(f\"An error occurred when training / 创建训练进程时出现致命错误: {e}\")\n finally:\n lock.release()\n\n @app.post(\"/api/run\")\n async def create_toml_file(request: Request, background_tasks: BackgroundTasks):\n acquired = lock.acquire(blocking=False)\n\n if not acquired:\n print(\"Training is already running / 已有正在进行的训练\")\n return {\"status\": \"fail\", \"detail\": \"Training is already running\"}\n\n timestamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n toml_file = f\"toml/{timestamp}.toml\"\n toml_data = await request.body()\n j = json.loads(toml_data.decode(\"utf-8\"))\n with open(toml_file, \"w\") as f:\n f.write(toml.dumps(j))\n background_tasks.add_task(run_train, toml_file)\n return {\"status\": \"success\"}\n\n @app.middleware(\"http\")\n async def add_cache_control_header(request, call_next):\n response = await call_next(request)\n response.headers[\"Cache-Control\"] = \"max-age=0\"\n return response\n\n @app.get(\"/\")\n async def index():\n return FileResponse(\"./frontend/dist/index.html\")\n\n\n app.mount(\"/\", StaticFiles(directory=\"frontend/dist\"), name=\"static\")\n\n if __name__ == \"__main__\":\n args, _ = parser.parse_known_args()\n print(f\"Server started at http://127.0.0.1:{args.port}\")\n if sys.platform == \"win32\":\n # disable triton on windows\n os.environ[\"XFORMERS_FORCE_DISABLE_TRITON\"] = \"1\"\n\n webbrowser.open(f\"http://127.0.0.1:{args.port}\")\n uvicorn.run(app, host=\"127.0.0.1\", port=args.port, log_level=\"error\")\n\ndef hugface_upload():\n if Path(hugTokenFile).exists():\n with open(hugTokenFile,encoding = \"utf-8\") as nkfile:\n hugToken = nkfile.readline()\n if upload == 1 and hugToken != '':\n from huggingface_hub import login\n # 使用您的 Hugging Face 访问令牌登录\n login(token= hugToken)\n from huggingface_hub import HfApi\n # 实例化 HfApi 类\n api = HfApi()\n print(\"HfApi 类已实例化\")\n\n # 使用 upload_file() 函数上传文件\n print(\"开始上传文件...\")\n response = api.upload_file(\n path_or_fileobj=\"/kaggle/working/模型.zip\",\n path_in_repo={path_in_repo},\n repo_id={repo_id},\n repo_type=\"dataset\"\n )\n print(\"文件上传完成\")\n print(f\"响应: {response}\")\n\ndef zip_clear():\n if zip_output == 1:\n shutil.make_archive('/kaggle/working/模型', 'zip', '/kaggle/working/output/')\n print('模型已压缩到output')\n if clear_output == 1:\n def clear_directory(folder_path):\n for filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename)\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n clear_directory('/kaggle/working/output')\n clear_directory('/kaggle/working/lora-scripts/logs')\n print('清理完毕')\n \ndef libtcmalloc():\n !apt-get remove google-perftools -y\n %cd /kaggle\n !mkdir temp\n %cd temp\n !wget -qq --show-progres http://launchpadlibrarian.net/367274644/libgoogle-perftools-dev_2.5-2.2ubuntu3_amd64.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/google-perftools_2.5-2.2ubuntu3_all.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libtcmalloc-minimal4_2.5-2.2ubuntu3_amd64.deb\n !wget -qq --show-progres https://launchpad.net/ubuntu/+source/google-perftools/2.5-2.2ubuntu3/+build/14795286/+files/libgoogle-perftools4_2.5-2.2ubuntu3_amd64.deb\n !apt install -qq libunwind8-dev -y\n !dpkg -i *.deb\n !rm *.deb\n os.environ[\"LD_PRELOAD\"] = \"libtcmalloc.so\"\n \ndef LoRA_scripts_install():\n %cd $install_path\n if reLoad == 1:\n !rm -rf lora-scripts\n if Path(\"lora-scripts\").exists():\n print('LoRA-scripts已安装')\n else:\n print('LoRA-scripts安装中')\n #下载秋叶的脚本\n !git clone --recurse-submodules https://github.com/Akegarasu/lora-scripts\n\ndef venv_install():\n %cd /opt/conda/envs\n if os.path.exists('venv'):\n print('环境已安装')\n else:\n %cd /kaggle/working/\n if not os.path.exists('venv.tar.gz'):\n print('环境包下载中')\n !wget https://huggingface.co/datasets/sukaka/venv_ai_drow/resolve/main/lora_train_py31010_torch2_cu118_xf17.tar.gz -O venv.tar.gz\n print('环境包已下载')\n %cd /opt/conda/envs/\n !mkdir venv\n %cd venv\n print('环境安装中')\n !tar -xzf /kaggle/working/venv.tar.gz\n !source /opt/conda/bin/activate venv\n print('环境安装完毕')\n \ndef ngrok_start(ngrokTokenFile: str, port: int, address_name: str):\n if Path(ngrokTokenFile).exists():\n with open(ngrokTokenFile, encoding=\"utf-8\") as nkfile:\n ngrokToken = nkfile.readline()\n print('use nrgok')\n from pyngrok import conf, ngrok\n conf.get_default().auth_token = ngrokToken\n conf.get_default().monitor_thread = False\n ssh_tunnels = ngrok.get_tunnels(conf.get_default())\n if len(ssh_tunnels) == 0:\n ssh_tunnel = ngrok.connect(port, bind_tls=True)\n print(f'{address_name}:' + ssh_tunnel.public_url)\n else:\n print(f'{address_name}:' + ssh_tunnels[0].public_url)\n else:\n print('skip start ngrok')\n \ndef tensorboard_start():\n ngrok_start(ngrokTokenFile,port,'tensorboard地址')\n %cd $install_path/lora-scripts\n !conda run -n venv --no-capture-output tensorboard --logdir='logs'\n\ndef train_start():\n %cd /kaggle/working\n !mkdir output\n %cd $install_path/lora-scripts\n\n ext_args = []\n launch_args = []\n if use2 == 1:\n os.environ['CUDA_VISIBLE_DEVICE'] = '0'\n launch_args.append('--gpu_ids 0')\n else:\n os.environ['CUDA_VISIBLE_DEVICE'] = '0,1'\n if multi_gpu == 1:\n launch_args.append('--multi_gpu')\n launch_args.append('--gpu_ids 0,1')\n #launch_args.append('--num_processes 2')\n if cpu_threads == 1:\n launch_args.append('--num_cpu_threads_per_process=4')\n if use_gui == 1:\n ngrok_start(ngrokTokenFile1,port1,'第一个炼丹炉')\n guy(0,port1)\n else:\n if Path(config_file).is_file():\n cmd = f\"\"\"conda run -n venv --no-capture-output accelerate launch {' '.join(launch_args)} \"sd-scripts/train_network.py\" \\\n --config_file={config_file}\"\"\"\n os.system(cmd)\n else:\n if lowram == 1:\n ext_args.append('--lowram')\n\n if is_v2_model == 1:\n ext_args.append('--v2')\n else:\n ext_args.append(f'--clip_skip {clip_skip}')\n\n if parameterization == 1:\n ext_args.append('--v_parameterization')\n\n if train_unet_only == 1:\n ext_args.append('--network_train_unet_only')\n\n if train_text_encoder_only == 1:\n ext_args.append('--network_train_text_encoder_only')\n\n if network_weights:\n ext_args.append(f'--network_weights {network_weights}')\n\n if reg_data_dir:\n ext_args.append(f'--reg_data_dir {reg_data_dir}')\n\n if optimizer_type:\n ext_args.append(f'--optimizer_type {optimizer_type}')\n\n if optimizer_type == 'DAdaptation':\n ext_args.append('--optimizer_args decouple=True')\n\n if save_state == 1:\n ext_args.append('--save_state')\n\n if resume:\n ext_args.append(f'--resume {resume}')\n\n if persistent_data_loader_workers == 1:\n ext_args.append('--persistent_data_loader_workers')\n\n if network_module == 'lycoris.kohya':\n ext_args.append(f'--network_args conv_dim={conv_dim} conv_alpha={conv_alpha} algo={algo} dropout={dropout}')\n\n if stop_text_encoder_training != 0:\n ext_args.append(f'--stop_text_encoder_training {stop_text_encoder_training}')\n\n if noise_offset != '0':\n ext_args.append(f'--noise_offset {noise_offset}')\n\n if min_snr_gamma != 0:\n ext_args.append(f'--min_snr_gamma {min_snr_gamma}')\n\n cmd = f\"\"\"conda run -n venv --no-capture-output accelerate launch {' '.join(launch_args)} \"sd-scripts/train_network.py\" \\\n --enable_bucket \\\n --pretrained_model_name_or_path={pretrained_model} \\\n --train_data_dir={train_data_dir} \\\n --output_dir=\"/kaggle/working/output\" \\\n --logging_dir=\"logs\" \\\n --log_prefix={output_name} \\\n --resolution={resolution} \\\n --network_module={network_module} \\\n --max_train_epochs={max_train_epoches} \\\n --learning_rate={lr} \\\n --unet_lr={unet_lr} \\\n --text_encoder_lr={text_encoder_lr} \\\n --lr_scheduler={lr_scheduler} \\\n --lr_warmup_steps={lr_warmup_steps} \\\n --lr_scheduler_num_cycles={lr_restart_cycles} \\\n --network_dim={network_dim} \\\n --network_alpha={network_alpha} \\\n --output_name={output_name} \\\n --train_batch_size={batch_size} \\\n --save_every_n_epochs={save_every_n_epochs} \\\n --mixed_precision=\"fp16\" \\\n --save_precision=\"fp16\" \\\n --seed=\"1337\" \\\n --cache_latents \\\n --prior_loss_weight=1 \\\n --max_token_length=225 \\\n --caption_extension=\".txt\" \\\n --save_model_as={save_model_as} \\\n --min_bucket_reso={min_bucket_reso} \\\n --max_bucket_reso={max_bucket_reso} \\\n --keep_tokens={keep_tokens} \\\n --xformers --shuffle_caption {' '.join(ext_args)} \"\"\"\n os.system(cmd)\n\ndef train_start1():\n %cd /kaggle/working\n !mkdir output\n %cd $install_path/lora-scripts\n\n ext_args = []\n launch_args = []\n \n launch_args.append('--gpu_ids 1')\n \n if cpu_threads == 1:\n launch_args.append('--num_cpu_threads_per_process=8')\n if use_gui1 == 1:\n ngrok_start(ngrokTokenFile2,port2,'第二个炼丹炉')\n guy(1,port2)\n else:\n if Path(config_file1).is_file():\n cmd = f\"\"\"conda run -n venv --no-capture-output accelerate launch {' '.join(launch_args)} \"sd-scripts/train_network.py\" \\\n --config_file={config_file1}\"\"\"\n os.system(cmd)\n else:\n if lowram == 1:\n ext_args.append('--lowram')\n\n if is_v2_model1 == 1:\n ext_args.append('--v2')\n else:\n ext_args.append(f'--clip_skip {clip_skip1}')\n\n if parameterization1 == 1:\n ext_args.append('--v_parameterization')\n\n if train_unet_only1 == 1:\n ext_args.append('--network_train_unet_only')\n\n if train_text_encoder_only1 == 1:\n ext_args.append('--network_train_text_encoder_only')\n\n if network_weights1:\n ext_args.append(f'--network_weights {network_weights1}')\n\n if reg_data_dir1:\n ext_args.append(f'--reg_data_dir {reg_data_dir1}')\n\n if optimizer_type1:\n ext_args.append(f'--optimizer_type {optimizer_type1}')\n\n if optimizer_type1 == 'DAdaptation':\n ext_args.append('--optimizer_args decouple=True')\n\n if save_state1 == 1:\n ext_args.append('--save_state')\n\n if resume1:\n ext_args.append(f'--resume {resume1}')\n\n if persistent_data_loader_workers1 == 1:\n ext_args.append('--persistent_data_loader_workers')\n\n if network_module1 == 'lycoris.kohya':\n ext_args.append(f'--network_args conv_dim={conv_dim1} conv_alpha={conv_alpha1} algo={algo1} dropout={dropout1}')\n\n if stop_text_encoder_training1 != 0:\n ext_args.append(f'--stop_text_encoder_training {stop_text_encoder_training1}')\n\n if noise_offset1 != '0':\n ext_args.append(f'--noise_offset {noise_offset1}')\n\n if min_snr_gamma1 != 0:\n ext_args.append(f'--min_snr_gamma {min_snr_gamma1}')\n\n cmd = f\"\"\"conda run -n venv --no-capture-output accelerate launch {' '.join(launch_args)} \"sd-scripts/train_network.py\" \\\n --enable_bucket \\\n --pretrained_model_name_or_path={pretrained_model1} \\\n --train_data_dir={train_data_dir1} \\\n --output_dir=\"/kaggle/working/output\" \\\n --logging_dir=\"logs\" \\\n --log_prefix={output_name1} \\\n --resolution={resolution1} \\\n --network_module={network_module1} \\\n --max_train_epochs={max_train_epoches1} \\\n --learning_rate={lr1} \\\n --unet_lr={unet_lr1} \\\n --text_encoder_lr={text_encoder_lr1} \\\n --lr_scheduler={lr_scheduler1} \\\n --lr_warmup_steps={lr_warmup_steps1} \\\n --lr_scheduler_num_cycles={lr_restart_cycles1} \\\n --network_dim={network_dim1} \\\n --network_alpha={network_alpha1} \\\n --output_name={output_name1} \\\n --train_batch_size={batch_size1} \\\n --save_every_n_epochs={save_every_n_epochs1} \\\n --mixed_precision=\"fp16\" \\\n --save_precision=\"fp16\" \\\n --seed=\"1337\" \\\n --cache_latents \\\n --prior_loss_weight=1 \\\n --max_token_length=225 \\\n --caption_extension=\".txt\" \\\n --save_model_as={save_model_as1} \\\n --min_bucket_reso={min_bucket_reso1} \\\n --max_bucket_reso={max_bucket_reso1} \\\n --keep_tokens={keep_tokens1} \\\n --xformers --shuffle_caption {' '.join(ext_args)} \"\"\"\n os.system(cmd)\n \ndef ready_train():\n !pip install huggingface_hub\n !pip install pyngrok\n LoRA_scripts_install()\n\ndef after_train():\n zip_clear()\n hugface_upload()\n \ndef main():\n startTicks = time.time()\n isInstall = True if os.getenv('IsInstall','False') == 'True' else False\n if isInstall is False or reLoad == 1:\n libtcmalloc()\n if __name__ == '__main__':\n t1 = multiprocessing.Process(target=venv_install)\n t2 = multiprocessing.Process(target=ready_train)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n isInstall = True\n os.environ['IsInstall'] = 'True'\n ticks = time.time()\n print(\"加载耗时:\",(ticks - startTicks),\"s\")\n gc.collect()\n if use2 == 1:\n if __name__ == '__main__':\n p1 = multiprocessing.Process(target=tensorboard_start)\n p2 = multiprocessing.Process(target=train_start)\n p3 = multiprocessing.Process(target=train_start1)\n p1.start()\n p2.start()\n p3.start()\n p1.join()\n p2.join()\n p3.join()\n else:\n if __name__ == '__main__':\n p1 = multiprocessing.Process(target=tensorboard_start)\n p2 = multiprocessing.Process(target=train_start)\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n ","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:31.278289Z","iopub.execute_input":"2023-04-26T10:15:31.278571Z","iopub.status.idle":"2023-04-26T10:15:32.377595Z","shell.execute_reply.started":"2023-04-26T10:15:31.278545Z","shell.execute_reply":"2023-04-26T10:15:32.376424Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"main()","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:15:32.380031Z","iopub.execute_input":"2023-04-26T10:15:32.380401Z","iopub.status.idle":"2023-04-26T10:24:22.221180Z","shell.execute_reply.started":"2023-04-26T10:15:32.380365Z","shell.execute_reply":"2023-04-26T10:24:22.210371Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#训练完毕,单独执行。进行打包和上传\nafter_train()","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:24:22.226751Z","iopub.status.idle":"2023-04-26T10:24:22.229381Z","shell.execute_reply.started":"2023-04-26T10:24:22.229070Z","shell.execute_reply":"2023-04-26T10:24:22.229102Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#!rm -rf /kaggle/working/lora-scripts/output","metadata":{"execution":{"iopub.status.busy":"2023-04-26T10:24:35.362462Z","iopub.execute_input":"2023-04-26T10:24:35.363150Z","iopub.status.idle":"2023-04-26T10:24:36.396697Z","shell.execute_reply.started":"2023-04-26T10:24:35.363104Z","shell.execute_reply":"2023-04-26T10:24:36.395312Z"},"trusted":true},"execution_count":null,"outputs":[]}]}