ashleykleynhans apollo812 commited on
Commit
d9c13c2
0 Parent(s):

Duplicate from apollo812/RNPD_SD

Browse files

Co-authored-by: Good Luck <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
Notebooks.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ https://huggingface.co/datasets/apollo812/RNPD_SD/raw/main/Notebooks/RNPD-SD.ipynb
2
+ https://huggingface.co/datasets/apollo812/RNPD_SD/resolve/main/Notebooks/SDXL-LoRA-RNPD.ipynb
Notebooks/RNPD-SD.ipynb ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Dependencies"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "# Install the dependencies\n",
17
+ "\n",
18
+ "force_reinstall= False\n",
19
+ "\n",
20
+ "# Set to true only if you want to install the dependencies again.\n",
21
+ "\n",
22
+ "#--------------------\n",
23
+ "with open('/dev/null', 'w') as devnull:import requests, os, time, importlib;open('/workspace/runpod_server.py', 'wb').write(requests.get('https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Scripts/mainrunpodA1111.py').content);os.chdir('/workspace');time.sleep(2);import mainrunpodA1111;importlib.reload(mainrunpodA1111);from mainrunpodA1111 import *;Deps(force_reinstall)"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "markdown",
28
+ "metadata": {},
29
+ "source": [
30
+ "# Install/Update AUTOMATIC1111 repo"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "Huggingface_token_optional=\"\"\n",
40
+ "\n",
41
+ "# Restore your backed-up SD folder by entering your huggingface token, leave it empty to start fresh or continue with the existing sd folder (if any).\n",
42
+ "\n",
43
+ "#--------------------\n",
44
+ "repo(Huggingface_token_optional)"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "markdown",
49
+ "metadata": {},
50
+ "source": [
51
+ "# Model Download/Load"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "metadata": {},
58
+ "outputs": [],
59
+ "source": [
60
+ "Original_Model_Version = \"SDXL\"\n",
61
+ "\n",
62
+ "# Choices are \"SDXL\", \"v1.5\", \"v2-512\", \"v2-768\"\n",
63
+ "\n",
64
+ "#-------------- Or\n",
65
+ "\n",
66
+ "Path_to_MODEL = \"\"\n",
67
+ "\n",
68
+ "# Insert the full path of your trained model or to a folder containing multiple models.\n",
69
+ "\n",
70
+ "\n",
71
+ "MODEL_LINK = \"\"\n",
72
+ "\n",
73
+ "# A direct link to a Model or a shared gdrive link.\n",
74
+ "\n",
75
+ "\n",
76
+ "#--------------------\n",
77
+ "model=mdl(Original_Model_Version, Path_to_MODEL, MODEL_LINK)"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "markdown",
82
+ "metadata": {},
83
+ "source": [
84
+ "# LoRA Download"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "code",
89
+ "execution_count": null,
90
+ "metadata": {},
91
+ "outputs": [],
92
+ "source": [
93
+ "# Download/update ControlNet extension and its models.\n",
94
+ "\n",
95
+ "ControlNet_v1_Model = \"all\"\n",
96
+ "\n",
97
+ "# Choices are : none; all; 1: Canny; 2: Depth; 3: Lineart; 4: MLSD; 5: Normal; 6: OpenPose; 7: Scribble; 8: Seg; 9: ip2p; 10:Shuffle; 11: Inpaint; 12: Softedge; 13: Lineart_Anime; 14: Tile; 15: T2iadapter_Models\n",
98
+ "\n",
99
+ "ControlNet_XL_Model = \"all\"\n",
100
+ "\n",
101
+ "# Choices are : none; all; 1: Canny; 2: Depth; 3: Sketch; 4: OpenPose; 5: Recolor\n",
102
+ "\n",
103
+ "#--------------------\n",
104
+ "CNet(ControlNet_v1_Model, ControlNet_XL_Model)"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "markdown",
109
+ "metadata": {},
110
+ "source": [
111
+ "# Start Stable-Diffusion"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": null,
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "User = \"\"\n",
121
+ "\n",
122
+ "Password= \"\"\n",
123
+ "\n",
124
+ "# Add credentials to your Gradio interface (optional).\n",
125
+ "\n",
126
+ "#-----------------\n",
127
+ "configf=sd(User, Password, model) if 'model' in locals() else sd(User, Password, \"\");import gradio;gradio.close_all()\n",
128
+ "!python /workspace/sd/stable-diffusion-webui/webui.py $configf"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "markdown",
133
+ "metadata": {},
134
+ "source": [
135
+ "# Backup SD folder"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": null,
141
+ "metadata": {},
142
+ "outputs": [],
143
+ "source": [
144
+ "# This will backup your sd folder -without the models- to your huggingface account, so you can restore it whenever you start an instance.\n",
145
+ "\n",
146
+ "Huggingface_Write_token=\"\"\n",
147
+ "\n",
148
+ "# Must be a WRITE token, get yours here : https://huggingface.co/settings/tokens\n",
149
+ "\n",
150
+ "#--------------------\n",
151
+ "save(Huggingface_Write_token)"
152
+ ]
153
+ }
154
+ ],
155
+ "metadata": {
156
+ "language_info": {
157
+ "name": "python"
158
+ }
159
+ },
160
+ "nbformat": 4,
161
+ "nbformat_minor": 2
162
+ }
Notebooks/SDXL-LoRA-RNPD.ipynb ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "# Dependencies"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "# Install the dependencies\n",
19
+ "\n",
20
+ "force_reinstall= False\n",
21
+ "\n",
22
+ "# Set to true only if you want to install the dependencies again.\n",
23
+ "\n",
24
+ "#--------------------\n",
25
+ "with open('/dev/null', 'w') as devnull:import requests, os, time, importlib;open('/workspace/sdxllorarunpod.py', 'wb').write(requests.get('https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Scripts/sdxllorarunpod.py').content);os.chdir('/workspace');import sdxllorarunpod;importlib.reload(sdxllorarunpod);from sdxllorarunpod import *;restored=False;restoreda=False;Deps(force_reinstall)"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "metadata": {},
31
+ "source": [
32
+ "# Download the model"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": null,
38
+ "metadata": {},
39
+ "outputs": [],
40
+ "source": [
41
+ "# Run the cell to download the model\n",
42
+ "\n",
43
+ "#-------------\n",
44
+ "MODEL_NAMExl=dls_xlf(\"\", \"\", \"\")"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "markdown",
49
+ "metadata": {},
50
+ "source": [
51
+ "# Create/Load a Session"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "metadata": {},
58
+ "outputs": [],
59
+ "source": [
60
+ "Session_Name = \"Example-Session\"\n",
61
+ "\n",
62
+ "# Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n",
63
+ "\n",
64
+ "#-----------------\n",
65
+ "[WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl]=sess_xl(Session_Name, MODEL_NAMExl if 'MODEL_NAMExl' in locals() else \"\")"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "markdown",
70
+ "metadata": {},
71
+ "source": [
72
+ "# Instance Images\n",
73
+ "The most important step is to rename the instance pictures to one unique unknown identifier"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": null,
79
+ "metadata": {},
80
+ "outputs": [],
81
+ "source": [
82
+ "Remove_existing_instance_images= True\n",
83
+ "\n",
84
+ "# Set to False to keep the existing instance images if any.\n",
85
+ "\n",
86
+ "\n",
87
+ "IMAGES_FOLDER_OPTIONAL= \"\"\n",
88
+ "\n",
89
+ "# If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n",
90
+ "\n",
91
+ "\n",
92
+ "Smart_crop_images = True\n",
93
+ "\n",
94
+ "# Automatically crop your input images.\n",
95
+ "\n",
96
+ "Crop_size = 1024\n",
97
+ "\n",
98
+ "# 1024 is the native resolution\n",
99
+ "\n",
100
+ "\n",
101
+ "#--------------------------------------------\n",
102
+ "\n",
103
+ "# Disabled when \"Smart_crop_images\" is set to \"True\"\n",
104
+ "\n",
105
+ "Resize_to_1024_and_keep_aspect_ratio = False\n",
106
+ "\n",
107
+ "# Will resize the smallest dimension to 1024 without cropping while keeping the aspect ratio (make sure you have enough VRAM)\n",
108
+ "\n",
109
+ "\n",
110
+ "# Check out this example for naming : https://i.imgur.com/d2lD3rz.jpeg\n",
111
+ "\n",
112
+ "#-----------------\n",
113
+ "uplder(Remove_existing_instance_images, Smart_crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR)"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "markdown",
118
+ "metadata": {},
119
+ "source": [
120
+ "# Manual Captioning"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": null,
126
+ "metadata": {},
127
+ "outputs": [],
128
+ "source": [
129
+ "# Open a tool to manually caption the instance images.\n",
130
+ "\n",
131
+ "#-----------------\n",
132
+ "caption(CAPTIONS_DIR, INSTANCE_DIR)"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "markdown",
137
+ "metadata": {},
138
+ "source": [
139
+ "# Train LoRA"
140
+ ]
141
+ },
142
+ {
143
+ "cell_type": "code",
144
+ "execution_count": null,
145
+ "metadata": {},
146
+ "outputs": [],
147
+ "source": [
148
+ "# Training Settings\n",
149
+ "\n",
150
+ "# Epoch = Number of steps/images\n",
151
+ "\n",
152
+ "\n",
153
+ "UNet_Training_Epochs= 120\n",
154
+ "\n",
155
+ "UNet_Learning_Rate= \"1e-6\"\n",
156
+ "\n",
157
+ "# Keep the learning rate between 1e-6 and 3e-6\n",
158
+ "\n",
159
+ "\n",
160
+ "Text_Encoder_Training_Epochs= 40\n",
161
+ "\n",
162
+ "# The training is highly affected by this value, a total of 300 steps (not epochs) is enough, set to 0 if enhancing existing concepts\n",
163
+ "\n",
164
+ "Text_Encoder_Learning_Rate= \"1e-6\"\n",
165
+ "\n",
166
+ "# Keep the learning rate at 1e-6 or lower\n",
167
+ "\n",
168
+ "\n",
169
+ "External_Captions= False\n",
170
+ "\n",
171
+ "# Load the captions from a text file for each instance image\n",
172
+ "\n",
173
+ "\n",
174
+ "LoRA_Dim = 64\n",
175
+ "\n",
176
+ "# Dimension of the LoRa model, between 64 and 128 is good enough\n",
177
+ "\n",
178
+ "\n",
179
+ "Save_VRAM = False\n",
180
+ "\n",
181
+ "# Use as low as 10GB VRAM with Dim = 64\n",
182
+ "\n",
183
+ "\n",
184
+ "Intermediary_Save_Epoch = \"[30,60]\"\n",
185
+ "\n",
186
+ "# [30,60] means it will save intermediary models at epoch 30 and epoch 60, you can add as many as you want like [30,60,80,100]\n",
187
+ "\n",
188
+ "\n",
189
+ "#-----------------\n",
190
+ "dbtrainxl(UNet_Training_Epochs, Text_Encoder_Training_Epochs, UNet_Learning_Rate, Text_Encoder_Learning_Rate, LoRA_Dim, False, 1024, MODEL_NAMExl, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, 0, Save_VRAM, Intermediary_Save_Epoch)"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "markdown",
195
+ "metadata": {},
196
+ "source": [
197
+ "# Test the Trained Model"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "markdown",
202
+ "metadata": {},
203
+ "source": [
204
+ "# ComfyUI"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "metadata": {},
211
+ "outputs": [],
212
+ "source": [
213
+ "Args=\"--listen --port 3000 --preview-method auto\"\n",
214
+ "\n",
215
+ "\n",
216
+ "Huggingface_token_optional= \"\"\n",
217
+ "\n",
218
+ "# Restore your backed-up Comfy folder by entering your huggingface token, leave it empty to start fresh or continue with the existing sd folder (if any).\n",
219
+ "\n",
220
+ "#--------------------\n",
221
+ "restored=sdcmff(Huggingface_token_optional, MDLPTH, restored)\n",
222
+ "!python /workspace/ComfyUI/main.py $Args"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "markdown",
227
+ "metadata": {},
228
+ "source": [
229
+ "# A1111"
230
+ ]
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": null,
235
+ "metadata": {},
236
+ "outputs": [],
237
+ "source": [
238
+ "User = \"\"\n",
239
+ "\n",
240
+ "Password= \"\"\n",
241
+ "\n",
242
+ "# Add credentials to your Gradio interface (optional).\n",
243
+ "\n",
244
+ "\n",
245
+ "Huggingface_token_optional= \"\"\n",
246
+ "\n",
247
+ "# Restore your backed-up SD folder by entering your huggingface token, leave it empty to start fresh or continue with the existing sd folder (if any).\n",
248
+ "\n",
249
+ "#-----------------\n",
250
+ "configf, restoreda=test(MDLPTH, User, Password, Huggingface_token_optional, restoreda)\n",
251
+ "!python /workspace/sd/stable-diffusion-webui/webui.py $configf"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "markdown",
256
+ "metadata": {},
257
+ "source": [
258
+ "# Free up space"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": null,
264
+ "metadata": {},
265
+ "outputs": [],
266
+ "source": [
267
+ "# Display a list of sessions from which you can remove any session you don't need anymore\n",
268
+ "\n",
269
+ "#-------------------------\n",
270
+ "clean()"
271
+ ]
272
+ }
273
+ ],
274
+ "metadata": {
275
+ "language_info": {
276
+ "name": "python"
277
+ }
278
+ },
279
+ "nbformat": 4,
280
+ "nbformat_minor": 2
281
+ }
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ ---
Scripts/runpodLoRA.py ADDED
@@ -0,0 +1,1160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ from subprocess import check_output
22
+ import six
23
+ import base64
24
+ import re
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ import urllib.request
28
+ from urllib.request import urlopen, Request
29
+
30
+ import tempfile
31
+ from tqdm import tqdm
32
+
33
+
34
+
35
+
36
+ def Deps(force_reinstall):
37
+
38
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.10/dist-packages/safetensors'):
39
+ ntbks()
40
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
41
+ print('Modules and notebooks updated, dependencies already installed')
42
+ os.environ['TORCH_HOME'] = '/workspace/cache/torch'
43
+ os.environ['PYTHONWARNINGS'] = 'ignore'
44
+ else:
45
+ call('pip install --root-user-action=ignore --disable-pip-version-check --no-deps -qq gdown PyWavelets numpy==1.23.5 accelerate==0.12.0 --force-reinstall', shell=True, stdout=open('/dev/null', 'w'))
46
+ ntbks()
47
+ if os.path.exists('deps'):
48
+ call("rm -r deps", shell=True)
49
+ if os.path.exists('diffusers'):
50
+ call("rm -r diffusers", shell=True)
51
+ call('mkdir deps', shell=True)
52
+ if not os.path.exists('cache'):
53
+ call('mkdir cache', shell=True)
54
+ os.chdir('deps')
55
+ dwn("https://huggingface.co/TheLastBen/dependencies/resolve/main/rnpddeps-t2.tar.zst", "/workspace/deps/rnpddeps-t2.tar.zst", "Installing dependencies")
56
+ call('tar -C / --zstd -xf rnpddeps-t2.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
57
+ call("sed -i 's@~/.cache@/workspace/cache@' /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", shell=True)
58
+ os.chdir('/workspace')
59
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
60
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers", shell=True, stdout=open('/dev/null', 'w'))
61
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq gradio==3.41.2', shell=True, stdout=open('/dev/null', 'w'))
62
+ call("rm -r deps", shell=True)
63
+ os.chdir('/workspace')
64
+ os.environ['TORCH_HOME'] = '/workspace/cache/torch'
65
+ os.environ['PYTHONWARNINGS'] = 'ignore'
66
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.10/warnings.py", shell=True)
67
+ clear_output()
68
+
69
+ done()
70
+
71
+
72
+ def dwn(url, dst, msg):
73
+ file_size = None
74
+ req = Request(url, headers={"User-Agent": "torch.hub"})
75
+ u = urlopen(req)
76
+ meta = u.info()
77
+ if hasattr(meta, 'getheaders'):
78
+ content_length = meta.getheaders("Content-Length")
79
+ else:
80
+ content_length = meta.get_all("Content-Length")
81
+ if content_length is not None and len(content_length) > 0:
82
+ file_size = int(content_length[0])
83
+
84
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
85
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
86
+ with open(dst, "wb") as f:
87
+ while True:
88
+ buffer = u.read(8192)
89
+ if len(buffer) == 0:
90
+ break
91
+ f.write(buffer)
92
+ pbar.update(len(buffer))
93
+ f.close()
94
+
95
+
96
+ def ntbks():
97
+
98
+ os.chdir('/workspace')
99
+ if not os.path.exists('Latest_Notebooks'):
100
+ call('mkdir Latest_Notebooks', shell=True)
101
+ else:
102
+ call('rm -r Latest_Notebooks', shell=True)
103
+ call('mkdir Latest_Notebooks', shell=True)
104
+ os.chdir('/workspace/Latest_Notebooks')
105
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
106
+ call('rm Notebooks.txt', shell=True)
107
+ os.chdir('/workspace')
108
+
109
+ def done():
110
+ done = widgets.Button(
111
+ description='Done!',
112
+ disabled=True,
113
+ button_style='success',
114
+ tooltip='',
115
+ icon='check'
116
+ )
117
+ display(done)
118
+
119
+
120
+
121
+ def mdlvxl():
122
+
123
+ os.chdir('/workspace')
124
+
125
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/workspace/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
126
+ call('rm -r stable-diffusion-XL', shell=True)
127
+ if not os.path.exists('stable-diffusion-XL'):
128
+ print('Downloading SDXL model...')
129
+ call('mkdir stable-diffusion-XL', shell=True)
130
+ os.chdir('stable-diffusion-XL')
131
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
132
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
133
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
134
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
135
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.bin\n!*.onnx*\n!*.xml" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
136
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
137
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.safetensors', 'text_encoder/model.safetensors', '1/4')
138
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.safetensors', 'text_encoder_2/model.safetensors', '2/4')
139
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae/diffusion_pytorch_model.safetensors', 'vae/diffusion_pytorch_model.safetensors', '3/4')
140
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.safetensors', 'unet/diffusion_pytorch_model.safetensors', '4/4')
141
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
142
+ os.chdir('/workspace')
143
+ clear_output()
144
+ while not os.path.exists('/workspace/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
145
+ print('Invalid HF token, make sure you have access to the model')
146
+ time.sleep(8)
147
+ if os.path.exists('/workspace/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
148
+ print('Using SDXL model')
149
+ else:
150
+ print('Using SDXL model')
151
+
152
+ call("sed -i 's@\"force_upcast.*@@' /workspace/stable-diffusion-XL/vae/config.json", shell=True)
153
+
154
+
155
+
156
+ def downloadmodel_hfxl(Path_to_HuggingFace):
157
+
158
+ os.chdir('/workspace')
159
+ if os.path.exists('stable-diffusion-custom'):
160
+ call("rm -r stable-diffusion-custom", shell=True)
161
+ clear_output()
162
+
163
+ if os.path.exists('Fast-Dreambooth/token.txt'):
164
+ with open("Fast-Dreambooth/token.txt") as f:
165
+ token = f.read()
166
+ authe=f'https://USER:{token}@'
167
+ else:
168
+ authe="https://"
169
+
170
+ clear_output()
171
+ call("mkdir stable-diffusion-custom", shell=True)
172
+ os.chdir("stable-diffusion-custom")
173
+ call("git init", shell=True)
174
+ call("git lfs install --system --skip-repo", shell=True)
175
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
176
+ call("git config core.sparsecheckout true", shell=True)
177
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.fp16.safetensors" > .git/info/sparse-checkout', shell=True)
178
+ call("git pull origin main", shell=True)
179
+ if os.path.exists('unet/diffusion_pytorch_model.safetensors'):
180
+ call("rm -r .git", shell=True)
181
+ os.chdir('/workspace')
182
+ clear_output()
183
+ done()
184
+ while not os.path.exists('/workspace/stable-diffusion-custom/unet/diffusion_pytorch_model.safetensors'):
185
+ print('Check the link you provided')
186
+ os.chdir('/workspace')
187
+ time.sleep(5)
188
+
189
+
190
+
191
+ def downloadmodel_link_xl(MODEL_LINK):
192
+
193
+ import wget
194
+ import gdown
195
+ from gdown.download import get_url_from_gdrive_confirmation
196
+
197
+ def getsrc(url):
198
+ parsed_url = urlparse(url)
199
+ if parsed_url.netloc == 'civitai.com':
200
+ src='civitai'
201
+ elif parsed_url.netloc == 'drive.google.com':
202
+ src='gdrive'
203
+ elif parsed_url.netloc == 'huggingface.co':
204
+ src='huggingface'
205
+ else:
206
+ src='others'
207
+ return src
208
+
209
+ src=getsrc(MODEL_LINK)
210
+
211
+ def get_name(url, gdrive):
212
+ if not gdrive:
213
+ response = requests.get(url, allow_redirects=False)
214
+ if "Location" in response.headers:
215
+ redirected_url = response.headers["Location"]
216
+ quer = parse_qs(urlparse(redirected_url).query)
217
+ if "response-content-disposition" in quer:
218
+ disp_val = quer["response-content-disposition"][0].split(";")
219
+ for vals in disp_val:
220
+ if vals.strip().startswith("filename="):
221
+ filenm=unquote(vals.split("=", 1)[1].strip())
222
+ return filenm.replace("\"","")
223
+ else:
224
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
225
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
226
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
227
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
228
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
229
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
230
+ return filenm
231
+
232
+ if src=='civitai':
233
+ modelname=get_name(MODEL_LINK, False)
234
+ elif src=='gdrive':
235
+ modelname=get_name(MODEL_LINK, True)
236
+ else:
237
+ modelname=os.path.basename(MODEL_LINK)
238
+
239
+
240
+ os.chdir('/workspace')
241
+ if src=='huggingface':
242
+ dwn(MODEL_LINK, modelname,'Downloading the Model')
243
+ else:
244
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
245
+
246
+ if os.path.exists(modelname):
247
+ if os.path.getsize(modelname) > 1810671599:
248
+
249
+ print('Converting to diffusers...')
250
+ call('python /workspace/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
251
+
252
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
253
+ os.chdir('/workspace')
254
+ clear_output()
255
+ done()
256
+ else:
257
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
258
+ print('Conversion error')
259
+ os.chdir('/workspace')
260
+ time.sleep(5)
261
+ else:
262
+ while os.path.getsize(modelname) < 1810671599:
263
+ print('Wrong link, check that the link is valid')
264
+ os.chdir('/workspace')
265
+ time.sleep(5)
266
+
267
+
268
+
269
+ def downloadmodel_path_xl(MODEL_PATH):
270
+
271
+ import wget
272
+ os.chdir('/workspace')
273
+ clear_output()
274
+ if os.path.exists(str(MODEL_PATH)):
275
+
276
+ print('Converting to diffusers...')
277
+ call('python /workspace/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
278
+
279
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
280
+ clear_output()
281
+ done()
282
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
283
+ print('Conversion error')
284
+ os.chdir('/workspace')
285
+ time.sleep(5)
286
+ else:
287
+ while not os.path.exists(str(MODEL_PATH)):
288
+ print('Wrong path, use the file explorer to copy the path')
289
+ os.chdir('/workspace')
290
+ time.sleep(5)
291
+
292
+
293
+
294
+
295
+ def dls_xlf(Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
296
+
297
+ os.chdir('/workspace')
298
+
299
+ if Path_to_HuggingFace != "":
300
+ downloadmodel_hfxl(Path_to_HuggingFace)
301
+ MODEL_NAMExl="/workspace/stable-diffusion-custom"
302
+
303
+ elif MODEL_PATH !="":
304
+
305
+ downloadmodel_path_xl(MODEL_PATH)
306
+ MODEL_NAMExl="/workspace/stable-diffusion-custom"
307
+
308
+ elif MODEL_LINK !="":
309
+
310
+ downloadmodel_link_xl(MODEL_LINK)
311
+ MODEL_NAMExl="/workspace/stable-diffusion-custom"
312
+
313
+ else:
314
+ mdlvxl()
315
+ MODEL_NAMExl="/workspace/stable-diffusion-XL"
316
+
317
+ return MODEL_NAMExl
318
+
319
+
320
+
321
+ def sess_xl(Session_Name, MODEL_NAMExl):
322
+ import gdown
323
+ import wget
324
+ os.chdir('/workspace')
325
+ PT=""
326
+
327
+ while Session_Name=="":
328
+ print('Input the Session Name:')
329
+ Session_Name=input("")
330
+ Session_Name=Session_Name.replace(" ","_")
331
+
332
+ WORKSPACE='/workspace/Fast-Dreambooth'
333
+
334
+ INSTANCE_NAME=Session_Name
335
+ OUTPUT_DIR="/workspace/models/"+Session_Name
336
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
337
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
338
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
339
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
340
+
341
+
342
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
343
+ print('Loading session with no previous LoRa model')
344
+ if MODEL_NAMExl=="":
345
+ print('No model found, use the "Model Download" cell to download a model.')
346
+ else:
347
+ print('Session Loaded, proceed')
348
+
349
+ elif not os.path.exists(str(SESSION_DIR)):
350
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
351
+ print('Creating session...')
352
+ if MODEL_NAMExl=="":
353
+ print('No model found, use the "Model Download" cell to download a model.')
354
+ else:
355
+ print('Session created, proceed to uploading instance images')
356
+ if MODEL_NAMExl=="":
357
+ print('No model found, use the "Model Download" cell to download a model.')
358
+
359
+ else:
360
+ print('Session Loaded, proceed')
361
+
362
+
363
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
364
+
365
+
366
+
367
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
368
+
369
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
370
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
371
+
372
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
373
+ Upload = widgets.Button(
374
+ description='Upload',
375
+ disabled=False,
376
+ button_style='info',
377
+ tooltip='Click to upload the chosen instance images',
378
+ icon=''
379
+ )
380
+
381
+
382
+ def up(Upload):
383
+ with out:
384
+ uploader.close()
385
+ Upload.close()
386
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
387
+ done()
388
+ out=widgets.Output()
389
+
390
+ if IMAGES_FOLDER_OPTIONAL=="":
391
+ Upload.on_click(up)
392
+ display(uploader, Upload, out)
393
+ else:
394
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
395
+ done()
396
+
397
+
398
+
399
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
400
+
401
+ from tqdm import tqdm
402
+ if Remove_existing_instance_images:
403
+ if os.path.exists(str(INSTANCE_DIR)):
404
+ call("rm -r " +INSTANCE_DIR, shell=True)
405
+ if os.path.exists(str(CAPTIONS_DIR)):
406
+ call("rm -r " +CAPTIONS_DIR, shell=True)
407
+
408
+
409
+ if not os.path.exists(str(INSTANCE_DIR)):
410
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
411
+ if not os.path.exists(str(CAPTIONS_DIR)):
412
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
413
+
414
+
415
+ if IMAGES_FOLDER_OPTIONAL !="":
416
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
417
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
418
+
419
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
420
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
421
+ if Crop_images:
422
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
423
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
424
+ os.chdir('/workspace')
425
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
426
+ extension = filename.split(".")[-1]
427
+ identifier=filename.split(".")[0]
428
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
429
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
430
+ file=file.convert("RGB")
431
+ file=ImageOps.exif_transpose(file)
432
+ width, height = file.size
433
+ if file.size !=(Crop_size, Crop_size):
434
+ image=crop_image(file, Crop_size)
435
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
436
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
437
+ else:
438
+ image[0].save(new_path_with_file, format=extension.upper())
439
+
440
+ else:
441
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
442
+
443
+ else:
444
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
445
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
446
+
447
+ elif IMAGES_FOLDER_OPTIONAL =="":
448
+ up=""
449
+ for file in uploader.value:
450
+ filename = file['name']
451
+ if filename.split(".")[-1]=="txt":
452
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
453
+ f.write(bytes(file['content']).decode())
454
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
455
+ if Crop_images:
456
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
457
+ filename = file['name']
458
+ img = Image.open(io.BytesIO(file['content']))
459
+ img=img.convert("RGB")
460
+ img=ImageOps.exif_transpose(img)
461
+ extension = filename.split(".")[-1]
462
+ identifier=filename.split(".")[0]
463
+
464
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
465
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
466
+ else:
467
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
468
+
469
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
470
+ file = Image.open(new_path_with_file)
471
+ width, height = file.size
472
+ if file.size !=(Crop_size, Crop_size):
473
+ image=crop_image(file, Crop_size)
474
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
475
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
476
+ else:
477
+ image[0].save(new_path_with_file, format=extension.upper())
478
+
479
+ else:
480
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
481
+ filename = file['name']
482
+ img = Image.open(io.BytesIO(file['content']))
483
+ img=img.convert("RGB")
484
+ extension = filename.split(".")[-1]
485
+ identifier=filename.split(".")[0]
486
+
487
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
488
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
489
+ else:
490
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
491
+
492
+
493
+ os.chdir(INSTANCE_DIR)
494
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
495
+ os.chdir(CAPTIONS_DIR)
496
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
497
+ os.chdir('/workspace')
498
+
499
+ if Resize_to_1024_and_keep_aspect_ratio and not Crop_images:
500
+ resize_keep_aspect(INSTANCE_DIR)
501
+
502
+
503
+
504
+
505
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
506
+
507
+ paths=""
508
+ out=""
509
+ widgets_l=""
510
+ clear_output()
511
+ def Caption(path):
512
+ if path!="Select an instance image to caption":
513
+
514
+ name = os.path.splitext(os.path.basename(path))[0]
515
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
516
+ if ext=="jpg" or "JPG":
517
+ ext="JPEG"
518
+
519
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
520
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
521
+ text = f.read()
522
+ else:
523
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
524
+ f.write("")
525
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
526
+ text = f.read()
527
+
528
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
529
+ img=img.convert("RGB")
530
+ img=img.resize((420, 420))
531
+ image_bytes = BytesIO()
532
+ img.save(image_bytes, format=ext, qualiy=10)
533
+ image_bytes.seek(0)
534
+ image_data = image_bytes.read()
535
+ img= image_data
536
+ image = widgets.Image(
537
+ value=img,
538
+ width=420,
539
+ height=420
540
+ )
541
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
542
+
543
+
544
+ def update_text(text):
545
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
546
+ f.write(text)
547
+
548
+ button = widgets.Button(description='Save', button_style='success')
549
+ button.on_click(lambda b: update_text(text_area.value))
550
+
551
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
552
+
553
+
554
+ paths = os.listdir(INSTANCE_DIR)
555
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
556
+
557
+
558
+ out = widgets.Output()
559
+
560
+ def click(change):
561
+ with out:
562
+ out.clear_output()
563
+ display(Caption(change.new))
564
+
565
+ widgets_l.observe(click, names='value')
566
+ display(widgets.HBox([widgets_l, out]))
567
+
568
+
569
+
570
+ def dbtrainxl(Unet_Training_Epochs, Text_Encoder_Training_Epochs, Unet_Learning_Rate, Text_Encoder_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM, Intermediary_Save_Epoch):
571
+
572
+
573
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
574
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
575
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
576
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
577
+
578
+
579
+ Seed=random.randint(1, 999999)
580
+
581
+ ofstnse=""
582
+ if Offset_Noise:
583
+ ofstnse="--offset_noise"
584
+
585
+ GC=''
586
+ if Save_VRAM:
587
+ GC='--gradient_checkpointing'
588
+
589
+ extrnlcptn=""
590
+ if External_Captions:
591
+ extrnlcptn="--external_captions"
592
+
593
+ precision="fp16"
594
+
595
+
596
+
597
+ def train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
598
+ print('Training the Text Encoder...')
599
+ call('accelerate launch /workspace/diffusers/examples/dreambooth/train_dreambooth_sdxl_TI.py \
600
+ '+ofstnse+' \
601
+ '+extrnlcptn+' \
602
+ --dim='+str(dim)+' \
603
+ --ofstnselvl='+str(ofstnselvl)+' \
604
+ --image_captions_filename \
605
+ --Session_dir='+SESSION_DIR+' \
606
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
607
+ --instance_data_dir='+INSTANCE_DIR+' \
608
+ --output_dir='+OUTPUT_DIR+' \
609
+ --captions_dir='+CAPTIONS_DIR+' \
610
+ --seed='+str(Seed)+' \
611
+ --resolution='+str(Resolution)+' \
612
+ --mixed_precision='+str(precision)+' \
613
+ --train_batch_size=1 \
614
+ --gradient_accumulation_steps=1 '+GC+ ' \
615
+ --use_8bit_adam \
616
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
617
+ --lr_scheduler="cosine" \
618
+ --lr_warmup_steps=0 \
619
+ --num_train_epochs='+str(Training_Epochs), shell=True)
620
+
621
+
622
+
623
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
624
+ print('Training the UNet...')
625
+ call('accelerate launch /workspace/diffusers/examples/dreambooth/train_dreambooth_sdxl_lora.py \
626
+ '+ofstnse+' \
627
+ '+extrnlcptn+' \
628
+ --saves='+Intermediary_Save_Epoch+' \
629
+ --dim='+str(dim)+' \
630
+ --ofstnselvl='+str(ofstnselvl)+' \
631
+ --image_captions_filename \
632
+ --Session_dir='+SESSION_DIR+' \
633
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
634
+ --instance_data_dir='+INSTANCE_DIR+' \
635
+ --output_dir='+OUTPUT_DIR+' \
636
+ --captions_dir='+CAPTIONS_DIR+' \
637
+ --seed='+str(Seed)+' \
638
+ --resolution='+str(Resolution)+' \
639
+ --mixed_precision='+str(precision)+' \
640
+ --train_batch_size=1 \
641
+ --gradient_accumulation_steps=1 '+GC+ ' \
642
+ --use_8bit_adam \
643
+ --learning_rate='+str(Unet_Learning_Rate)+' \
644
+ --lr_scheduler="cosine" \
645
+ --lr_warmup_steps=0 \
646
+ --num_train_epochs='+str(Training_Epochs), shell=True)
647
+
648
+
649
+
650
+ if Unet_Training_Epochs!=0:
651
+ if Text_Encoder_Training_Epochs!=0:
652
+ train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Text_Encoder_Training_Epochs)
653
+ clear_output()
654
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Unet_Training_Epochs)
655
+ else :
656
+ print('Nothing to do')
657
+
658
+
659
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
660
+ clear_output()
661
+ print("DONE, the LoRa model is in the session's folder")
662
+ else:
663
+ print("Something went wrong")
664
+
665
+
666
+
667
+
668
+ def sdcmff(Huggingface_token_optional, MDLPTH, restored):
669
+
670
+ from slugify import slugify
671
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
672
+
673
+ os.chdir('/workspace')
674
+
675
+ if restored:
676
+ Huggingface_token_optional=""
677
+
678
+ if Huggingface_token_optional!="":
679
+ username = HfApi().whoami(Huggingface_token_optional)["name"]
680
+ backup=f"https://huggingface.co/datasets/{username}/fast-stable-diffusion/resolve/main/sdcomfy_backup_rnpd.tar.zst"
681
+ headers = {"Authorization": f"Bearer {Huggingface_token_optional}"}
682
+ response = requests.head(backup, headers=headers)
683
+ if response.status_code == 302:
684
+ restored=True
685
+ print('Restoring ComfyUI...')
686
+ open('/workspace/sdcomfy_backup_rnpd.tar.zst', 'wb').write(requests.get(backup, headers=headers).content)
687
+ call('tar --zstd -xf sdcomfy_backup_rnpd.tar.zst', shell=True)
688
+ call('rm sdcomfy_backup_rnpd.tar.zst', shell=True)
689
+ else:
690
+ print('Backup not found, using a fresh/existing repo...')
691
+ time.sleep(2)
692
+ if not os.path.exists('ComfyUI'):
693
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
694
+ else:
695
+ print('Installing/Updating the repo...')
696
+ if not os.path.exists('ComfyUI'):
697
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
698
+
699
+ os.chdir('ComfyUI')
700
+ call('git reset --hard', shell=True)
701
+ print('')
702
+ call('git pull', shell=True)
703
+
704
+ if os.path.exists(MDLPTH):
705
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/loras', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
706
+
707
+ clean_symlinks('models/loras')
708
+
709
+ if not os.path.exists('models/checkpoints/sd_xl_base_1.0.safetensors'):
710
+ call('ln -s /workspace/auto-models/* models/checkpoints', shell=True)
711
+
712
+
713
+ podid=os.environ.get('RUNPOD_POD_ID')
714
+ localurl=f"https://{podid}-3001.proxy.runpod.net"
715
+ call("sed -i 's@print(\"To see the GUI go to: http://{}:{}\".format(address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /workspace/ComfyUI/server.py", shell=True)
716
+ os.chdir('/workspace')
717
+
718
+ return restored
719
+
720
+
721
+
722
+
723
+ def test(MDLPTH, User, Password, Huggingface_token_optional, restoreda):
724
+
725
+ from slugify import slugify
726
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
727
+ import gradio
728
+
729
+ gradio.close_all()
730
+
731
+
732
+ auth=f"--gradio-auth {User}:{Password}"
733
+ if User =="" or Password=="":
734
+ auth=""
735
+
736
+
737
+ if restoreda:
738
+ Huggingface_token_optional=""
739
+
740
+ if Huggingface_token_optional!="":
741
+ username = HfApi().whoami(Huggingface_token_optional)["name"]
742
+ backup=f"https://huggingface.co/datasets/{username}/fast-stable-diffusion/resolve/main/sd_backup_rnpd.tar.zst"
743
+ headers = {"Authorization": f"Bearer {Huggingface_token_optional}"}
744
+ response = requests.head(backup, headers=headers)
745
+ if response.status_code == 302:
746
+ restoreda=True
747
+ print('Restoring the SD folder...')
748
+ open('/workspace/sd_backup_rnpd.tar.zst', 'wb').write(requests.get(backup, headers=headers).content)
749
+ call('tar --zstd -xf sd_backup_rnpd.tar.zst', shell=True)
750
+ call('rm sd_backup_rnpd.tar.zst', shell=True)
751
+ else:
752
+ print('Backup not found, using a fresh/existing repo...')
753
+ time.sleep(2)
754
+ if not os.path.exists('/workspace/sd/stablediffusiond'): #reset later
755
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
756
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
757
+ call('rm sd_mrep.tar.zst', shell=True)
758
+ os.chdir('/workspace/sd')
759
+ if not os.path.exists('stable-diffusion-webui'):
760
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
761
+
762
+ else:
763
+ print('Installing/Updating the repo...')
764
+ os.chdir('/workspace')
765
+ if not os.path.exists('/workspace/sd/stablediffusiond'): #reset later
766
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
767
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
768
+ call('rm sd_mrep.tar.zst', shell=True)
769
+
770
+ os.chdir('/workspace/sd')
771
+ if not os.path.exists('stable-diffusion-webui'):
772
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
773
+
774
+
775
+ os.chdir('/workspace/sd/stable-diffusion-webui/')
776
+ call('git reset --hard', shell=True)
777
+ print('')
778
+ call('git pull', shell=True)
779
+
780
+
781
+ if os.path.exists(MDLPTH):
782
+ call('mkdir models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
783
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
784
+
785
+ if not os.path.exists('models/Stable-diffusion/sd_xl_base_1.0.safetensors'):
786
+ call('ln -s /workspace/auto-models/* models/Stable-diffusion', shell=True)
787
+
788
+ clean_symlinks('models/Lora')
789
+
790
+ os.chdir('/workspace')
791
+
792
+
793
+ call('wget -q -O /usr/local/lib/python3.10/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
794
+
795
+ os.chdir('/workspace/sd/stable-diffusion-webui/modules')
796
+
797
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/workspace/sd/stablediffusion\"]@' /workspace/sd/stable-diffusion-webui/modules/paths.py", shell=True)
798
+ call("sed -i 's@\.\.\/@src/@g' /workspace/sd/stable-diffusion-webui/modules/paths.py", shell=True)
799
+ call("sed -i 's@src\/generative-models@generative-models@g' /workspace/sd/stable-diffusion-webui/modules/paths.py", shell=True)
800
+
801
+ call("sed -i 's@\[\"sd_model_checkpoint\"\]@\[\"sd_model_checkpoint\", \"sd_vae\", \"CLIP_stop_at_last_layers\", \"inpainting_mask_weight\", \"initial_noise_multiplier\"\]@g' /workspace/sd/stable-diffusion-webui/modules/shared.py", shell=True)
802
+ call("sed -i 's@print(\"No module.*@@' /workspace/sd/stablediffusion/ldm/modules/diffusionmodules/model.py", shell=True)
803
+ os.chdir('/workspace/sd/stable-diffusion-webui')
804
+ clear_output()
805
+
806
+ podid=os.environ.get('RUNPOD_POD_ID')
807
+ localurl=f"{podid}-3001.proxy.runpod.net"
808
+
809
+ for line in fileinput.input('/usr/local/lib/python3.10/dist-packages/gradio/blocks.py', inplace=True):
810
+ if line.strip().startswith('self.server_name ='):
811
+ line = f' self.server_name = "{localurl}"\n'
812
+ if line.strip().startswith('self.protocol = "https"'):
813
+ line = ' self.protocol = "https"\n'
814
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
815
+ line = ''
816
+ if line.strip().startswith('else "http"'):
817
+ line = ''
818
+ sys.stdout.write(line)
819
+
820
+
821
+ configf="--disable-console-progressbars --upcast-sampling --no-half-vae --disable-safe-unpickle --api --opt-sdp-attention --enable-insecure-extension-access --no-download-sd-model --skip-version-check --listen --port 3000 --ckpt /workspace/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors "+auth
822
+
823
+
824
+ return configf, restoreda
825
+
826
+
827
+
828
+
829
+ def clean():
830
+
831
+ Sessions=os.listdir("/workspace/Fast-Dreambooth/Sessions")
832
+
833
+ s = widgets.Select(
834
+ options=Sessions,
835
+ rows=5,
836
+ description='',
837
+ disabled=False
838
+ )
839
+
840
+ out=widgets.Output()
841
+
842
+ d = widgets.Button(
843
+ description='Remove',
844
+ disabled=False,
845
+ button_style='warning',
846
+ tooltip='Removet the selected session',
847
+ icon='warning'
848
+ )
849
+
850
+ def rem(d):
851
+ with out:
852
+ if s.value is not None:
853
+ clear_output()
854
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
855
+ call('rm -r /workspace/Fast-Dreambooth/Sessions/'+s.value, shell=True)
856
+ if os.path.exists('/workspace/models/'+s.value):
857
+ call('rm -r /workspace/models/'+s.value, shell=True)
858
+ s.options=os.listdir("/workspace/Fast-Dreambooth/Sessions")
859
+
860
+
861
+ else:
862
+ d.close()
863
+ s.close()
864
+ clear_output()
865
+ print("NOTHING TO REMOVE")
866
+
867
+ d.on_click(rem)
868
+ if s.value is not None:
869
+ display(s,d,out)
870
+ else:
871
+ print("NOTHING TO REMOVE")
872
+
873
+
874
+
875
+ def crop_image(im, size):
876
+
877
+ import cv2
878
+
879
+ GREEN = "#0F0"
880
+ BLUE = "#00F"
881
+ RED = "#F00"
882
+
883
+ def focal_point(im, settings):
884
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
885
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
886
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
887
+
888
+ pois = []
889
+
890
+ weight_pref_total = 0
891
+ if len(corner_points) > 0:
892
+ weight_pref_total += settings.corner_points_weight
893
+ if len(entropy_points) > 0:
894
+ weight_pref_total += settings.entropy_points_weight
895
+ if len(face_points) > 0:
896
+ weight_pref_total += settings.face_points_weight
897
+
898
+ corner_centroid = None
899
+ if len(corner_points) > 0:
900
+ corner_centroid = centroid(corner_points)
901
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
902
+ pois.append(corner_centroid)
903
+
904
+ entropy_centroid = None
905
+ if len(entropy_points) > 0:
906
+ entropy_centroid = centroid(entropy_points)
907
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
908
+ pois.append(entropy_centroid)
909
+
910
+ face_centroid = None
911
+ if len(face_points) > 0:
912
+ face_centroid = centroid(face_points)
913
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
914
+ pois.append(face_centroid)
915
+
916
+ average_point = poi_average(pois, settings)
917
+
918
+ return average_point
919
+
920
+
921
+ def image_face_points(im, settings):
922
+
923
+ np_im = np.array(im)
924
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
925
+
926
+ tries = [
927
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
928
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
929
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
930
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
931
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
932
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
933
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
934
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
935
+ ]
936
+ for t in tries:
937
+ classifier = cv2.CascadeClassifier(t[0])
938
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
939
+ try:
940
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
941
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
942
+ except:
943
+ continue
944
+
945
+ if len(faces) > 0:
946
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
947
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
948
+ return []
949
+
950
+
951
+ def image_corner_points(im, settings):
952
+ grayscale = im.convert("L")
953
+
954
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
955
+ gd = ImageDraw.Draw(grayscale)
956
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
957
+
958
+ np_im = np.array(grayscale)
959
+
960
+ points = cv2.goodFeaturesToTrack(
961
+ np_im,
962
+ maxCorners=100,
963
+ qualityLevel=0.04,
964
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
965
+ useHarrisDetector=False,
966
+ )
967
+
968
+ if points is None:
969
+ return []
970
+
971
+ focal_points = []
972
+ for point in points:
973
+ x, y = point.ravel()
974
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
975
+
976
+ return focal_points
977
+
978
+
979
+ def image_entropy_points(im, settings):
980
+ landscape = im.height < im.width
981
+ portrait = im.height > im.width
982
+ if landscape:
983
+ move_idx = [0, 2]
984
+ move_max = im.size[0]
985
+ elif portrait:
986
+ move_idx = [1, 3]
987
+ move_max = im.size[1]
988
+ else:
989
+ return []
990
+
991
+ e_max = 0
992
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
993
+ crop_best = crop_current
994
+ while crop_current[move_idx[1]] < move_max:
995
+ crop = im.crop(tuple(crop_current))
996
+ e = image_entropy(crop)
997
+
998
+ if (e > e_max):
999
+ e_max = e
1000
+ crop_best = list(crop_current)
1001
+
1002
+ crop_current[move_idx[0]] += 4
1003
+ crop_current[move_idx[1]] += 4
1004
+
1005
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1006
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1007
+
1008
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1009
+
1010
+
1011
+ def image_entropy(im):
1012
+ # greyscale image entropy
1013
+ # band = np.asarray(im.convert("L"))
1014
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1015
+ hist, _ = np.histogram(band, bins=range(0, 256))
1016
+ hist = hist[hist > 0]
1017
+ return -np.log2(hist / hist.sum()).sum()
1018
+
1019
+ def centroid(pois):
1020
+ x = [poi.x for poi in pois]
1021
+ y = [poi.y for poi in pois]
1022
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1023
+
1024
+
1025
+ def poi_average(pois, settings):
1026
+ weight = 0.0
1027
+ x = 0.0
1028
+ y = 0.0
1029
+ for poi in pois:
1030
+ weight += poi.weight
1031
+ x += poi.x * poi.weight
1032
+ y += poi.y * poi.weight
1033
+ avg_x = round(weight and x / weight)
1034
+ avg_y = round(weight and y / weight)
1035
+
1036
+ return PointOfInterest(avg_x, avg_y)
1037
+
1038
+
1039
+ def is_landscape(w, h):
1040
+ return w > h
1041
+
1042
+
1043
+ def is_portrait(w, h):
1044
+ return h > w
1045
+
1046
+
1047
+ def is_square(w, h):
1048
+ return w == h
1049
+
1050
+
1051
+ class PointOfInterest:
1052
+ def __init__(self, x, y, weight=1.0, size=10):
1053
+ self.x = x
1054
+ self.y = y
1055
+ self.weight = weight
1056
+ self.size = size
1057
+
1058
+ def bounding(self, size):
1059
+ return [
1060
+ self.x - size//2,
1061
+ self.y - size//2,
1062
+ self.x + size//2,
1063
+ self.y + size//2
1064
+ ]
1065
+
1066
+ class Settings:
1067
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1068
+ self.crop_width = crop_width
1069
+ self.crop_height = crop_height
1070
+ self.corner_points_weight = corner_points_weight
1071
+ self.entropy_points_weight = entropy_points_weight
1072
+ self.face_points_weight = face_points_weight
1073
+
1074
+ settings = Settings(
1075
+ crop_width = size,
1076
+ crop_height = size,
1077
+ face_points_weight = 0.9,
1078
+ entropy_points_weight = 0.15,
1079
+ corner_points_weight = 0.5,
1080
+ )
1081
+
1082
+ scale_by = 1
1083
+ if is_landscape(im.width, im.height):
1084
+ scale_by = settings.crop_height / im.height
1085
+ elif is_portrait(im.width, im.height):
1086
+ scale_by = settings.crop_width / im.width
1087
+ elif is_square(im.width, im.height):
1088
+ if is_square(settings.crop_width, settings.crop_height):
1089
+ scale_by = settings.crop_width / im.width
1090
+ elif is_landscape(settings.crop_width, settings.crop_height):
1091
+ scale_by = settings.crop_width / im.width
1092
+ elif is_portrait(settings.crop_width, settings.crop_height):
1093
+ scale_by = settings.crop_height / im.height
1094
+
1095
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1096
+ im_debug = im.copy()
1097
+
1098
+ focus = focal_point(im_debug, settings)
1099
+
1100
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1101
+ # point but then get adjusted back into the frame
1102
+ y_half = int(settings.crop_height / 2)
1103
+ x_half = int(settings.crop_width / 2)
1104
+
1105
+ x1 = focus.x - x_half
1106
+ if x1 < 0:
1107
+ x1 = 0
1108
+ elif x1 + settings.crop_width > im.width:
1109
+ x1 = im.width - settings.crop_width
1110
+
1111
+ y1 = focus.y - y_half
1112
+ if y1 < 0:
1113
+ y1 = 0
1114
+ elif y1 + settings.crop_height > im.height:
1115
+ y1 = im.height - settings.crop_height
1116
+
1117
+ x2 = x1 + settings.crop_width
1118
+ y2 = y1 + settings.crop_height
1119
+
1120
+ crop = [x1, y1, x2, y2]
1121
+
1122
+ results = []
1123
+
1124
+ results.append(im.crop(tuple(crop)))
1125
+
1126
+ return results
1127
+
1128
+
1129
+
1130
+ def resize_keep_aspect(DIR):
1131
+
1132
+ import cv2
1133
+
1134
+ min_dimension=1024
1135
+
1136
+ for filename in os.listdir(DIR):
1137
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
1138
+ image = cv2.imread(os.path.join(DIR, filename))
1139
+
1140
+ org_height, org_width = image.shape[0], image.shape[1]
1141
+
1142
+ if org_width < org_height:
1143
+ new_width = min_dimension
1144
+ new_height = int(org_height * (min_dimension / org_width))
1145
+ else:
1146
+ new_height = min_dimension
1147
+ new_width = int(org_width * (min_dimension / org_height))
1148
+
1149
+ resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
1150
+
1151
+ cv2.imwrite(os.path.join(DIR, filename), resized_image, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
1152
+
1153
+
1154
+
1155
+
1156
+ def clean_symlinks(path):
1157
+ for item in os.listdir(path):
1158
+ lnk = os.path.join(path, item)
1159
+ if os.path.islink(lnk) and not os.path.exists(os.readlink(lnk)):
1160
+ os.remove(lnk)
Scripts/runpodSD.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from IPython.display import clear_output
3
+ from subprocess import call, getoutput, Popen, run
4
+ import time
5
+ import ipywidgets as widgets
6
+ import requests
7
+ import sys
8
+ import fileinput
9
+ from torch.hub import download_url_to_file
10
+ from urllib.parse import urlparse, parse_qs, unquote
11
+ import re
12
+ import six
13
+
14
+ from urllib.request import urlopen, Request
15
+ import tempfile
16
+ from tqdm import tqdm
17
+
18
+
19
+
20
+
21
+ def Deps(force_reinstall):
22
+
23
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.10/dist-packages/safetensors'):
24
+ ntbks()
25
+ print('Modules and notebooks updated, dependencies already installed')
26
+ os.environ['TORCH_HOME'] = '/workspace/cache/torch'
27
+ os.environ['PYTHONWARNINGS'] = 'ignore'
28
+ else:
29
+ call('pip install --root-user-action=ignore --disable-pip-version-check --no-deps -qq gdown PyWavelets numpy==1.23.5 accelerate==0.12.0 --force-reinstall', shell=True, stdout=open('/dev/null', 'w'))
30
+ ntbks()
31
+ if os.path.exists('deps'):
32
+ call("rm -r deps", shell=True)
33
+ if os.path.exists('diffusers'):
34
+ call("rm -r diffusers", shell=True)
35
+ call('mkdir deps', shell=True)
36
+ if not os.path.exists('cache'):
37
+ call('mkdir cache', shell=True)
38
+ os.chdir('deps')
39
+ dwn("https://huggingface.co/TheLastBen/dependencies/resolve/main/rnpddeps-t2.tar.zst", "/workspace/deps/rnpddeps-t2.tar.zst", "Installing dependencies")
40
+ call('tar -C / --zstd -xf rnpddeps-t2.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
41
+ call("sed -i 's@~/.cache@/workspace/cache@' /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", shell=True)
42
+ os.chdir('/workspace')
43
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers", shell=True, stdout=open('/dev/null', 'w'))
44
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq gradio==3.41.2', shell=True, stdout=open('/dev/null', 'w'))
45
+ call("rm -r deps", shell=True)
46
+ os.chdir('/workspace')
47
+ os.environ['TORCH_HOME'] = '/workspace/cache/torch'
48
+ os.environ['PYTHONWARNINGS'] = 'ignore'
49
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.10/warnings.py", shell=True)
50
+ clear_output()
51
+
52
+ done()
53
+
54
+
55
+ def dwn(url, dst, msg):
56
+ file_size = None
57
+ req = Request(url, headers={"User-Agent": "torch.hub"})
58
+ u = urlopen(req)
59
+ meta = u.info()
60
+ if hasattr(meta, 'getheaders'):
61
+ content_length = meta.getheaders("Content-Length")
62
+ else:
63
+ content_length = meta.get_all("Content-Length")
64
+ if content_length is not None and len(content_length) > 0:
65
+ file_size = int(content_length[0])
66
+
67
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
68
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
69
+ with open(dst, "wb") as f:
70
+ while True:
71
+ buffer = u.read(8192)
72
+ if len(buffer) == 0:
73
+ break
74
+ f.write(buffer)
75
+ pbar.update(len(buffer))
76
+ f.close()
77
+
78
+
79
+ def ntbks():
80
+
81
+ os.chdir('/workspace')
82
+ if not os.path.exists('Latest_Notebooks'):
83
+ call('mkdir Latest_Notebooks', shell=True)
84
+ else:
85
+ call('rm -r Latest_Notebooks', shell=True)
86
+ call('mkdir Latest_Notebooks', shell=True)
87
+ os.chdir('/workspace/Latest_Notebooks')
88
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
89
+ call('rm Notebooks.txt', shell=True)
90
+ os.chdir('/workspace')
91
+
92
+
93
+ def repo(Huggingface_token_optional):
94
+
95
+ from slugify import slugify
96
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
97
+
98
+ os.chdir('/workspace')
99
+ if Huggingface_token_optional!="":
100
+ username = HfApi().whoami(Huggingface_token_optional)["name"]
101
+ backup=f"https://huggingface.co/datasets/{username}/fast-stable-diffusion/resolve/main/sd_backup_rnpd.tar.zst"
102
+ headers = {"Authorization": f"Bearer {Huggingface_token_optional}"}
103
+ response = requests.head(backup, headers=headers)
104
+ if response.status_code == 302:
105
+ print('Restoring the SD folder...')
106
+ open('/workspace/sd_backup_rnpd.tar.zst', 'wb').write(requests.get(backup, headers=headers).content)
107
+ call('tar --zstd -xf sd_backup_rnpd.tar.zst', shell=True)
108
+ call('rm sd_backup_rnpd.tar.zst', shell=True)
109
+ else:
110
+ print('Backup not found, using a fresh/existing repo...')
111
+ time.sleep(2)
112
+ if not os.path.exists('/workspace/sd/stablediffusiond'): #reset later
113
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
114
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
115
+ call('rm sd_mrep.tar.zst', shell=True)
116
+ os.chdir('/workspace/sd')
117
+ if not os.path.exists('SD'):
118
+ call('git clone -q --depth 1 --branch main https://github.com/apollo812/SD', shell=True)
119
+
120
+ else:
121
+ print('Installing/Updating the repo...')
122
+ os.chdir('/workspace')
123
+ if not os.path.exists('/workspace/sd/stablediffusiond'): #reset later
124
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
125
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
126
+ call('rm sd_mrep.tar.zst', shell=True)
127
+
128
+ os.chdir('/workspace/sd')
129
+ if not os.path.exists('SD'):
130
+ call('git clone -q --depth 1 --branch main https://github.com/apollo812/SD', shell=True)
131
+
132
+
133
+ os.chdir('/workspace/sd/SD/')
134
+ call('git reset --hard', shell=True)
135
+ print('')
136
+ call('git pull', shell=True)
137
+ os.chdir('/workspace')
138
+ clear_output()
139
+ done()
140
+
141
+
142
+
143
+ def mdl(Original_Model_Version, Path_to_MODEL, MODEL_LINK):
144
+
145
+ import gdown
146
+
147
+ src=getsrc(MODEL_LINK)
148
+
149
+ if not os.path.exists('/workspace/sd/SD/models/Stable-diffusion/SDv1-5.ckpt'):
150
+ call('ln -s /workspace/auto-models/* /workspace/sd/SD/models/Stable-diffusion', shell=True)
151
+
152
+ if Path_to_MODEL !='':
153
+ if os.path.exists(str(Path_to_MODEL)):
154
+ print('Using the custom model')
155
+ model=Path_to_MODEL
156
+ else:
157
+ print('Wrong path, check that the path to the model is correct')
158
+
159
+ elif MODEL_LINK !="":
160
+
161
+ if src=='civitai':
162
+ modelname=get_name(MODEL_LINK, False)
163
+ model=f'/workspace/sd/SD/models/Stable-diffusion/{modelname}'
164
+ if not os.path.exists(model):
165
+ dwn(MODEL_LINK, model, 'Downloading the custom model')
166
+ clear_output()
167
+ else:
168
+ print('Model already exists')
169
+ elif src=='gdrive':
170
+ modelname=get_name(MODEL_LINK, True)
171
+ model=f'/workspace/sd/SD/models/Stable-diffusion/{modelname}'
172
+ if not os.path.exists(model):
173
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
174
+ clear_output()
175
+ else:
176
+ print('Model already exists')
177
+ else:
178
+ modelname=os.path.basename(MODEL_LINK)
179
+ model=f'/workspace/sd/SD/models/Stable-diffusion/{modelname}'
180
+ if not os.path.exists(model):
181
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
182
+ clear_output()
183
+ else:
184
+ print('Model already exists')
185
+
186
+ if os.path.exists(model) and os.path.getsize(model) > 1810671599:
187
+ print('Model downloaded, using the custom model.')
188
+ else:
189
+ call('rm '+model, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
190
+ print('Wrong link, check that the link is valid')
191
+
192
+ else:
193
+ if Original_Model_Version == "v1.5":
194
+ model="/workspace/sd/SD/models/Stable-diffusion/SDv1-5.ckpt"
195
+ print('Using the original V1.5 model')
196
+ elif Original_Model_Version == "v2-512":
197
+ model='/workspace/sd/SD/models/Stable-diffusion/SDv2-512.ckpt'
198
+ if not os.path.exists('/workspace/sd/SD/models/Stable-diffusion/SDv2-512.ckpt'):
199
+ print('Downloading the V2-512 model...')
200
+ call('gdown -O '+model+' https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-nonema-pruned.ckpt', shell=True)
201
+ clear_output()
202
+ print('Using the original V2-512 model')
203
+ elif Original_Model_Version == "v2-768":
204
+ model="/workspace/sd/SD/models/Stable-diffusion/SDv2-768.ckpt"
205
+ print('Using the original V2-768 model')
206
+ elif Original_Model_Version == "SDXL":
207
+ model="/workspace/sd/SD/models/Stable-diffusion/sd_xl_base_1.0.safetensors"
208
+ print('Using the original SDXL model')
209
+
210
+ else:
211
+ model="/workspace/sd/SD/models/Stable-diffusion"
212
+ print('Wrong model version, try again')
213
+ try:
214
+ model
215
+ except:
216
+ model="/workspace/sd/SD/models/Stable-diffusion"
217
+
218
+ return model
219
+
220
+
221
+ def loradwn(LoRA_LINK):
222
+
223
+ if LoRA_LINK=='':
224
+ print('Nothing to do')
225
+ else:
226
+ os.makedirs('/workspace/sd/SD/models/Lora', exist_ok=True)
227
+
228
+ src=getsrc(LoRA_LINK)
229
+
230
+ if src=='civitai':
231
+ modelname=get_name(LoRA_LINK, False)
232
+ loramodel=f'/workspace/sd/SD/models/Lora/{modelname}'
233
+ if not os.path.exists(loramodel):
234
+ dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
235
+ clear_output()
236
+ else:
237
+ print('Model already exists')
238
+ elif src=='gdrive':
239
+ modelname=get_name(LoRA_LINK, True)
240
+ loramodel=f'/workspace/sd/SD/models/Lora/{modelname}'
241
+ if not os.path.exists(loramodel):
242
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
243
+ clear_output()
244
+ else:
245
+ print('Model already exists')
246
+ else:
247
+ modelname=os.path.basename(LoRA_LINK)
248
+ loramodel=f'/workspace/sd/SD/models/Lora/{modelname}'
249
+ if not os.path.exists(loramodel):
250
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
251
+ clear_output()
252
+ else:
253
+ print('Model already exists')
254
+
255
+ if os.path.exists(loramodel) :
256
+ print('LoRA downloaded')
257
+ else:
258
+ print('Wrong link, check that the link is valid')
259
+
260
+
261
+
262
+ def CNet(ControlNet_Model, ControlNet_XL_Model):
263
+
264
+ def download(url, model_dir):
265
+
266
+ filename = os.path.basename(urlparse(url).path)
267
+ pth = os.path.abspath(os.path.join(model_dir, filename))
268
+ if not os.path.exists(pth):
269
+ print('Downloading: '+os.path.basename(url))
270
+ download_url_to_file(url, pth, hash_prefix=None, progress=True)
271
+ else:
272
+ print(f"The model {filename} already exists")
273
+
274
+ wrngv1=False
275
+ os.chdir('/workspace/sd/SD/extensions')
276
+ if not os.path.exists("sd-webui-controlnet"):
277
+ call('git clone https://github.com/Mikubill/sd-webui-controlnet.git', shell=True)
278
+ os.chdir('/workspace')
279
+ else:
280
+ os.chdir('sd-webui-controlnet')
281
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
282
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
283
+ os.chdir('/workspace')
284
+
285
+ mdldir="/workspace/sd/SD/extensions/sd-webui-controlnet/models"
286
+ for filename in os.listdir(mdldir):
287
+ if "_sd14v1" in filename:
288
+ renamed = re.sub("_sd14v1", "-fp16", filename)
289
+ os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
290
+
291
+ call('wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt', shell=True)
292
+ call('wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt', shell=True)
293
+
294
+ with open("CN_models.txt", 'r') as f:
295
+ mdllnk = f.read().splitlines()
296
+ with open("CN_models_XL.txt", 'r') as d:
297
+ mdllnk_XL = d.read().splitlines()
298
+ call('rm CN_models.txt CN_models_XL.txt', shell=True)
299
+
300
+ os.chdir('/workspace')
301
+
302
+ if ControlNet_Model == "All" or ControlNet_Model == "all" :
303
+ for lnk in mdllnk:
304
+ download(lnk, mdldir)
305
+ clear_output()
306
+
307
+
308
+ elif ControlNet_Model == "15":
309
+ mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
310
+ for lnk in mdllnk:
311
+ download(lnk, mdldir)
312
+ clear_output()
313
+
314
+
315
+ elif ControlNet_Model.isdigit() and int(ControlNet_Model)-1<14 and int(ControlNet_Model)>0:
316
+ download(mdllnk[int(ControlNet_Model)-1], mdldir)
317
+ clear_output()
318
+
319
+ elif ControlNet_Model == "none":
320
+ pass
321
+ clear_output()
322
+
323
+ else:
324
+ print('Wrong ControlNet V1 choice, try again')
325
+ wrngv1=True
326
+
327
+
328
+ if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all" :
329
+ for lnk_XL in mdllnk_XL:
330
+ download(lnk_XL, mdldir)
331
+ if not wrngv1:
332
+ clear_output()
333
+ done()
334
+
335
+ elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model)-1<5:
336
+ download(mdllnk_XL[int(ControlNet_XL_Model)-1], mdldir)
337
+ if not wrngv1:
338
+ clear_output()
339
+ done()
340
+
341
+ elif ControlNet_XL_Model == "none":
342
+ pass
343
+ if not wrngv1:
344
+ clear_output()
345
+ done()
346
+
347
+ else:
348
+ print('Wrong ControlNet XL choice, try again')
349
+
350
+
351
+
352
+ def sd(User, Password, model):
353
+
354
+ import gradio
355
+
356
+ gradio.close_all()
357
+
358
+ auth=f"--gradio-auth {User}:{Password}"
359
+ if User =="" or Password=="":
360
+ auth=""
361
+
362
+ call('wget -q -O /usr/local/lib/python3.10/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
363
+
364
+ os.chdir('/workspace/sd/SD/modules')
365
+
366
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/workspace/sd/stablediffusion\"]@' /workspace/sd/SD/modules/paths.py", shell=True)
367
+ call("sed -i 's@\.\.\/@src/@g' /workspace/sd/SD/modules/paths.py", shell=True)
368
+ call("sed -i 's@src\/generative-models@generative-models@g' /workspace/sd/SD/modules/paths.py", shell=True)
369
+
370
+ call("sed -i 's@\[\"sd_model_checkpoint\"\]@\[\"sd_model_checkpoint\", \"sd_vae\", \"CLIP_stop_at_last_layers\", \"inpainting_mask_weight\", \"initial_noise_multiplier\"\]@g' /workspace/sd/SD/modules/shared.py", shell=True)
371
+
372
+ call("sed -i 's@print(\"No module.*@@' /workspace/sd/stablediffusion/ldm/modules/diffusionmodules/model.py", shell=True)
373
+ os.chdir('/workspace/sd/SD')
374
+ clear_output()
375
+
376
+ podid=os.environ.get('RUNPOD_POD_ID')
377
+ localurl=f"{podid}-3001.proxy.runpod.net"
378
+
379
+ for line in fileinput.input('/usr/local/lib/python3.10/dist-packages/gradio/blocks.py', inplace=True):
380
+ if line.strip().startswith('self.server_name ='):
381
+ line = f' self.server_name = "{localurl}"\n'
382
+ if line.strip().startswith('self.protocol = "https"'):
383
+ line = ' self.protocol = "https"\n'
384
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
385
+ line = ''
386
+ if line.strip().startswith('else "http"'):
387
+ line = ''
388
+ sys.stdout.write(line)
389
+
390
+ if model=="":
391
+ mdlpth=""
392
+ else:
393
+ if os.path.isfile(model):
394
+ mdlpth="--ckpt "+model
395
+ else:
396
+ mdlpth="--ckpt-dir "+model
397
+
398
+ configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --opt-sdp-attention --enable-insecure-extension-access --skip-version-check --listen --port 3000 "+auth+" "+mdlpth
399
+
400
+ return configf
401
+
402
+
403
+
404
+ def save(Huggingface_Write_token):
405
+
406
+ from slugify import slugify
407
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
408
+
409
+ if Huggingface_Write_token=="":
410
+ print('A huggingface write token is required')
411
+
412
+ else:
413
+ os.chdir('/workspace')
414
+
415
+ if os.path.exists('sd'):
416
+
417
+ call('tar --exclude="SD/models/*/*" --exclude="sd-webui-controlnet/models/*" --zstd -cf sd_backup_rnpd.tar.zst sd', shell=True)
418
+ api = HfApi()
419
+ username = api.whoami(token=Huggingface_Write_token)["name"]
420
+
421
+ repo_id = f"{username}/{slugify('fast-stable-diffusion')}"
422
+
423
+ print("Backing up...")
424
+
425
+ operations = [CommitOperationAdd(path_in_repo="sd_backup_rnpd.tar.zst", path_or_fileobj="/workspace/sd_backup_rnpd.tar.zst")]
426
+
427
+ create_repo(repo_id,private=True, token=Huggingface_Write_token, exist_ok=True, repo_type="dataset")
428
+
429
+ api.create_commit(
430
+ repo_id=repo_id,
431
+ repo_type="dataset",
432
+ operations=operations,
433
+ commit_message="SD folder Backup",
434
+ token=Huggingface_Write_token
435
+ )
436
+
437
+ call('rm sd_backup_rnpd.tar.zst', shell=True)
438
+ clear_output()
439
+
440
+ done()
441
+
442
+ else:
443
+ print('Nothing to backup')
444
+
445
+
446
+
447
+
448
+ def getsrc(url):
449
+
450
+ parsed_url = urlparse(url)
451
+
452
+ if parsed_url.netloc == 'civitai.com':
453
+ src='civitai'
454
+ elif parsed_url.netloc == 'drive.google.com':
455
+ src='gdrive'
456
+ elif parsed_url.netloc == 'huggingface.co':
457
+ src='huggingface'
458
+ else:
459
+ src='others'
460
+ return src
461
+
462
+
463
+
464
+ def get_name(url, gdrive):
465
+
466
+ from gdown.download import get_url_from_gdrive_confirmation
467
+
468
+ if not gdrive:
469
+ response = requests.get(url, allow_redirects=False)
470
+ if "Location" in response.headers:
471
+ redirected_url = response.headers["Location"]
472
+ quer = parse_qs(urlparse(redirected_url).query)
473
+ if "response-content-disposition" in quer:
474
+ disp_val = quer["response-content-disposition"][0].split(";")
475
+ for vals in disp_val:
476
+ if vals.strip().startswith("filename="):
477
+ filenm=unquote(vals.split("=", 1)[1].strip())
478
+ return filenm.replace("\"","")
479
+ else:
480
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
481
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
482
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
483
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
484
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
485
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
486
+ return filenm
487
+
488
+
489
+
490
+
491
+ def done():
492
+ done = widgets.Button(
493
+ description='Done!',
494
+ disabled=True,
495
+ button_style='success',
496
+ tooltip='',
497
+ icon='check'
498
+ )
499
+ display(done)