Spaces:
Running
on
Zero
Running
on
Zero
Cagliostro
commited on
Commit
•
ab4e488
1
Parent(s):
433fc87
fix error when handling outputs
Browse files- app.py +62 -75
- demo.ipynb +112 -108
- utils.py +7 -4
app.py
CHANGED
@@ -11,10 +11,7 @@ import logging
|
|
11 |
from PIL import Image, PngImagePlugin
|
12 |
from datetime import datetime
|
13 |
from diffusers.models import AutoencoderKL
|
14 |
-
from diffusers import
|
15 |
-
StableDiffusionXLPipeline,
|
16 |
-
StableDiffusionXLImg2ImgPipeline
|
17 |
-
)
|
18 |
|
19 |
logging.basicConfig(level=logging.INFO)
|
20 |
logger = logging.getLogger(__name__)
|
@@ -31,7 +28,10 @@ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
|
31 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
32 |
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
|
33 |
|
34 |
-
MODEL = os.getenv(
|
|
|
|
|
|
|
35 |
|
36 |
torch.backends.cudnn.deterministic = True
|
37 |
torch.backends.cudnn.benchmark = False
|
@@ -82,7 +82,7 @@ def generate(
|
|
82 |
upscale_by: float = 1.5,
|
83 |
add_quality_tags: bool = True,
|
84 |
progress=gr.Progress(track_tqdm=True),
|
85 |
-
)
|
86 |
generator = utils.seed_everything(seed)
|
87 |
|
88 |
width, height = utils.aspect_ratio_handler(
|
@@ -131,6 +131,7 @@ def generate(
|
|
131 |
}
|
132 |
else:
|
133 |
metadata["use_upscaler"] = None
|
|
|
134 |
logger.info(json.dumps(metadata, indent=4))
|
135 |
|
136 |
try:
|
@@ -167,13 +168,17 @@ def generate(
|
|
167 |
generator=generator,
|
168 |
output_type="pil",
|
169 |
).images
|
170 |
-
|
171 |
-
if images
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
177 |
except Exception as e:
|
178 |
logger.exception(f"An error occurred: {e}")
|
179 |
raise
|
@@ -251,6 +256,22 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
251 |
value="896 x 1152",
|
252 |
container=True,
|
253 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
with gr.Group():
|
255 |
use_upscaler = gr.Checkbox(label="Use Upscaler", value=False)
|
256 |
with gr.Row() as upscaler_row:
|
@@ -270,22 +291,6 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
270 |
value=1.5,
|
271 |
visible=False,
|
272 |
)
|
273 |
-
with gr.Group(visible=False) as custom_resolution:
|
274 |
-
with gr.Row():
|
275 |
-
custom_width = gr.Slider(
|
276 |
-
label="Width",
|
277 |
-
minimum=MIN_IMAGE_SIZE,
|
278 |
-
maximum=MAX_IMAGE_SIZE,
|
279 |
-
step=8,
|
280 |
-
value=1024,
|
281 |
-
)
|
282 |
-
custom_height = gr.Slider(
|
283 |
-
label="Height",
|
284 |
-
minimum=MIN_IMAGE_SIZE,
|
285 |
-
maximum=MAX_IMAGE_SIZE,
|
286 |
-
step=8,
|
287 |
-
value=1024,
|
288 |
-
)
|
289 |
with gr.Group():
|
290 |
sampler = gr.Dropdown(
|
291 |
label="Sampler",
|
@@ -320,6 +325,7 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
320 |
result = gr.Gallery(
|
321 |
label="Result",
|
322 |
columns=1,
|
|
|
323 |
preview=True,
|
324 |
show_label=False
|
325 |
)
|
@@ -347,25 +353,12 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
347 |
api_name=False,
|
348 |
)
|
349 |
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
guidance_scale,
|
357 |
-
num_inference_steps,
|
358 |
-
sampler,
|
359 |
-
aspect_ratio_selector,
|
360 |
-
style_selector,
|
361 |
-
quality_selector,
|
362 |
-
use_upscaler,
|
363 |
-
upscaler_strength,
|
364 |
-
upscale_by,
|
365 |
-
add_quality_tags,
|
366 |
-
]
|
367 |
-
|
368 |
-
prompt.submit(
|
369 |
fn=utils.randomize_seed_fn,
|
370 |
inputs=[seed, randomize_seed],
|
371 |
outputs=seed,
|
@@ -373,32 +366,26 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
373 |
api_name=False,
|
374 |
).then(
|
375 |
fn=generate,
|
376 |
-
inputs=
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
fn=utils.randomize_seed_fn,
|
394 |
-
inputs=[seed, randomize_seed],
|
395 |
-
outputs=seed,
|
396 |
-
queue=False,
|
397 |
-
api_name=False,
|
398 |
-
).then(
|
399 |
-
fn=generate,
|
400 |
-
inputs=inputs,
|
401 |
outputs=[result, gr_metadata],
|
402 |
-
api_name=
|
403 |
)
|
404 |
-
|
|
|
|
|
|
11 |
from PIL import Image, PngImagePlugin
|
12 |
from datetime import datetime
|
13 |
from diffusers.models import AutoencoderKL
|
14 |
+
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
|
|
|
|
|
|
|
15 |
|
16 |
logging.basicConfig(level=logging.INFO)
|
17 |
logger = logging.getLogger(__name__)
|
|
|
28 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
29 |
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
|
30 |
|
31 |
+
MODEL = os.getenv(
|
32 |
+
"MODEL",
|
33 |
+
"https://huggingface.co/cagliostrolab/animagine-xl-3.1/blob/main/animagine-xl-3.1.safetensors",
|
34 |
+
)
|
35 |
|
36 |
torch.backends.cudnn.deterministic = True
|
37 |
torch.backends.cudnn.benchmark = False
|
|
|
82 |
upscale_by: float = 1.5,
|
83 |
add_quality_tags: bool = True,
|
84 |
progress=gr.Progress(track_tqdm=True),
|
85 |
+
):
|
86 |
generator = utils.seed_everything(seed)
|
87 |
|
88 |
width, height = utils.aspect_ratio_handler(
|
|
|
131 |
}
|
132 |
else:
|
133 |
metadata["use_upscaler"] = None
|
134 |
+
metadata["model"] = DESCRIPTION
|
135 |
logger.info(json.dumps(metadata, indent=4))
|
136 |
|
137 |
try:
|
|
|
168 |
generator=generator,
|
169 |
output_type="pil",
|
170 |
).images
|
171 |
+
|
172 |
+
if images:
|
173 |
+
image_paths = [
|
174 |
+
utils.save_image(image, metadata, OUTPUT_DIR, IS_COLAB)
|
175 |
+
for image in images
|
176 |
+
]
|
177 |
+
|
178 |
+
for image_path in image_paths:
|
179 |
+
logger.info(f"Image saved as {image_path} with metadata")
|
180 |
+
|
181 |
+
return image_paths, metadata
|
182 |
except Exception as e:
|
183 |
logger.exception(f"An error occurred: {e}")
|
184 |
raise
|
|
|
256 |
value="896 x 1152",
|
257 |
container=True,
|
258 |
)
|
259 |
+
with gr.Group(visible=False) as custom_resolution:
|
260 |
+
with gr.Row():
|
261 |
+
custom_width = gr.Slider(
|
262 |
+
label="Width",
|
263 |
+
minimum=MIN_IMAGE_SIZE,
|
264 |
+
maximum=MAX_IMAGE_SIZE,
|
265 |
+
step=8,
|
266 |
+
value=1024,
|
267 |
+
)
|
268 |
+
custom_height = gr.Slider(
|
269 |
+
label="Height",
|
270 |
+
minimum=MIN_IMAGE_SIZE,
|
271 |
+
maximum=MAX_IMAGE_SIZE,
|
272 |
+
step=8,
|
273 |
+
value=1024,
|
274 |
+
)
|
275 |
with gr.Group():
|
276 |
use_upscaler = gr.Checkbox(label="Use Upscaler", value=False)
|
277 |
with gr.Row() as upscaler_row:
|
|
|
291 |
value=1.5,
|
292 |
visible=False,
|
293 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
with gr.Group():
|
295 |
sampler = gr.Dropdown(
|
296 |
label="Sampler",
|
|
|
325 |
result = gr.Gallery(
|
326 |
label="Result",
|
327 |
columns=1,
|
328 |
+
height='100%',
|
329 |
preview=True,
|
330 |
show_label=False
|
331 |
)
|
|
|
353 |
api_name=False,
|
354 |
)
|
355 |
|
356 |
+
gr.on(
|
357 |
+
triggers=[
|
358 |
+
prompt.submit,
|
359 |
+
negative_prompt.submit,
|
360 |
+
run_button.click,
|
361 |
+
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
362 |
fn=utils.randomize_seed_fn,
|
363 |
inputs=[seed, randomize_seed],
|
364 |
outputs=seed,
|
|
|
366 |
api_name=False,
|
367 |
).then(
|
368 |
fn=generate,
|
369 |
+
inputs=[
|
370 |
+
prompt,
|
371 |
+
negative_prompt,
|
372 |
+
seed,
|
373 |
+
custom_width,
|
374 |
+
custom_height,
|
375 |
+
guidance_scale,
|
376 |
+
num_inference_steps,
|
377 |
+
sampler,
|
378 |
+
aspect_ratio_selector,
|
379 |
+
style_selector,
|
380 |
+
quality_selector,
|
381 |
+
use_upscaler,
|
382 |
+
upscaler_strength,
|
383 |
+
upscale_by,
|
384 |
+
add_quality_tags,
|
385 |
+
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
outputs=[result, gr_metadata],
|
387 |
+
api_name="run",
|
388 |
)
|
389 |
+
|
390 |
+
if __name__ == "__main__":
|
391 |
+
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|
demo.ipynb
CHANGED
@@ -1,110 +1,114 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
"
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
"\n",
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
"
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
},
|
95 |
-
"
|
96 |
-
|
97 |
-
|
98 |
-
"version": 3
|
99 |
-
},
|
100 |
-
"file_extension": ".py",
|
101 |
-
"mimetype": "text/x-python",
|
102 |
-
"name": "python",
|
103 |
-
"nbconvert_exporter": "python",
|
104 |
-
"pygments_lexer": "ipython3",
|
105 |
-
"version": "3.10.12"
|
106 |
-
}
|
107 |
-
},
|
108 |
-
"nbformat": 4,
|
109 |
-
"nbformat_minor": 5
|
110 |
-
}
|
|
|
1 |
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "538a3f0c-50c1-4952-9fcc-070d365c9a0f",
|
7 |
+
"metadata": {
|
8 |
+
"scrolled": true,
|
9 |
+
"id": "538a3f0c-50c1-4952-9fcc-070d365c9a0f"
|
10 |
+
},
|
11 |
+
"outputs": [],
|
12 |
+
"source": [
|
13 |
+
"import os\n",
|
14 |
+
"import subprocess\n",
|
15 |
+
"from threading import Timer\n",
|
16 |
+
"from queue import Queue\n",
|
17 |
+
"\n",
|
18 |
+
"def is_colab():\n",
|
19 |
+
" try:\n",
|
20 |
+
" import google.colab\n",
|
21 |
+
" return True\n",
|
22 |
+
" except ImportError:\n",
|
23 |
+
" return False\n",
|
24 |
+
"\n",
|
25 |
+
"ROOT_DIR = \"/workspace/\" if not is_colab() else \"/content/\"\n",
|
26 |
+
"REPO_URL = \"https://huggingface.co/spaces/cagliostrolab/animagine-xl-3.1\"\n",
|
27 |
+
"REPO_DIR = os.path.join(ROOT_DIR, \"animagine-xl\")\n",
|
28 |
+
"\n",
|
29 |
+
"NGROK_TOKEN = \"\"\n",
|
30 |
+
"NGROK_SUBDOMAIN = \"\"\n",
|
31 |
+
"PORT = 7860\n",
|
32 |
+
"\n",
|
33 |
+
"# os.environ[\"HF_TOKEN\"] = \"\"\n",
|
34 |
+
"os.environ[\"IS_COLAB\"] = \"1\"\n",
|
35 |
+
"os.environ[\"MODEL\"] = \"https://huggingface.co/cagliostrolab/animagine-xl-3.1/blob/main/animagine-xl-3.1.safetensors\"\n",
|
36 |
+
"os.environ[\"CACHE_EXAMPLES\"] = \"1\"\n",
|
37 |
+
"\n",
|
38 |
+
"def clone_repository(url, directory, branch=None):\n",
|
39 |
+
" subprocess.run([\"git\", \"clone\", url, directory], check=True)\n",
|
40 |
+
" if branch:\n",
|
41 |
+
" subprocess.run([\"git\", \"checkout\", branch], cwd=directory, check=True)\n",
|
42 |
+
"\n",
|
43 |
+
"def install_dependencies(directory):\n",
|
44 |
+
" dependencies = [\"accelerate==0.27.2\", \"diffusers==0.26.3\", \"gradio==4.20.0\",\n",
|
45 |
+
" \"invisible-watermark==0.2.0\", \"spaces==0.24.0\", \"omegaconf==2.3.0\", \"timm==0.9.10\"]\n",
|
46 |
+
" if is_colab():\n",
|
47 |
+
" subprocess.run([\"pip\", \"install\"] + dependencies, check=True)\n",
|
48 |
+
" else:\n",
|
49 |
+
" requirements_path = os.path.join(directory, \"requirements.txt\")\n",
|
50 |
+
" subprocess.run([\"pip\", \"install\", \"-r\", requirements_path], check=True)\n",
|
51 |
+
"\n",
|
52 |
+
"def setup_ngrok_tunnel(port, queue, auth_token, subdomain):\n",
|
53 |
+
" ngrok.set_auth_token(auth_token)\n",
|
54 |
+
" url = ngrok.connect(port, bind_tls=True, subdomain=subdomain)\n",
|
55 |
+
" queue.put(url)\n",
|
56 |
+
"\n",
|
57 |
+
"def main():\n",
|
58 |
+
" if not os.path.exists(REPO_DIR):\n",
|
59 |
+
" print(f\"Cloning repository to {REPO_DIR}\")\n",
|
60 |
+
" clone_repository(REPO_URL, REPO_DIR)\n",
|
61 |
+
"\n",
|
62 |
+
" print(\"Installing required Python libraries\")\n",
|
63 |
+
" install_dependencies(REPO_DIR)\n",
|
64 |
+
" print(\"Done!\")\n",
|
65 |
+
"\n",
|
66 |
+
" os.chdir(REPO_DIR)\n",
|
67 |
+
"\n",
|
68 |
+
" if NGROK_TOKEN:\n",
|
69 |
+
" try:\n",
|
70 |
+
" from pyngrok import conf, ngrok\n",
|
71 |
+
" except ImportError:\n",
|
72 |
+
" subprocess.run([\"pip\", \"install\", \"-qqqq\", \"--upgrade\", \"setuptools\"], check=True)\n",
|
73 |
+
" subprocess.run([\"pip\", \"install\", \"-qqqq\", \"-U\", \"pyngrok\"], check=True)\n",
|
74 |
+
" from pyngrok import conf, ngrok\n",
|
75 |
+
"\n",
|
76 |
+
" ngrok.kill()\n",
|
77 |
+
" ngrok_output_queue = Queue()\n",
|
78 |
+
" ngrok_thread = Timer(2, setup_ngrok_tunnel, args=(PORT, ngrok_output_queue, NGROK_TOKEN, NGROK_SUBDOMAIN))\n",
|
79 |
+
" ngrok_thread.start()\n",
|
80 |
+
" ngrok_thread.join()\n",
|
81 |
+
" print(ngrok_output_queue.get())\n",
|
82 |
+
"\n",
|
83 |
+
" !python app.py\n",
|
84 |
+
"\n",
|
85 |
+
"if __name__ == \"__main__\":\n",
|
86 |
+
" main()"
|
87 |
+
]
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"metadata": {
|
91 |
+
"kernelspec": {
|
92 |
+
"display_name": "Python 3 (ipykernel)",
|
93 |
+
"language": "python",
|
94 |
+
"name": "python3"
|
95 |
+
},
|
96 |
+
"language_info": {
|
97 |
+
"codemirror_mode": {
|
98 |
+
"name": "ipython",
|
99 |
+
"version": 3
|
100 |
+
},
|
101 |
+
"file_extension": ".py",
|
102 |
+
"mimetype": "text/x-python",
|
103 |
+
"name": "python",
|
104 |
+
"nbconvert_exporter": "python",
|
105 |
+
"pygments_lexer": "ipython3",
|
106 |
+
"version": "3.10.12"
|
107 |
+
},
|
108 |
+
"colab": {
|
109 |
+
"provenance": []
|
110 |
+
}
|
111 |
},
|
112 |
+
"nbformat": 4,
|
113 |
+
"nbformat_minor": 5
|
114 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils.py
CHANGED
@@ -4,6 +4,7 @@ import random
|
|
4 |
import numpy as np
|
5 |
import json
|
6 |
import torch
|
|
|
7 |
from PIL import Image, PngImagePlugin
|
8 |
from datetime import datetime
|
9 |
from dataclasses import dataclass
|
@@ -158,12 +159,14 @@ def preprocess_image_dimensions(width, height):
|
|
158 |
return width, height
|
159 |
|
160 |
|
161 |
-
def save_image(image, metadata, output_dir):
|
162 |
-
|
|
|
|
|
|
|
|
|
163 |
os.makedirs(output_dir, exist_ok=True)
|
164 |
-
filename = f"image_{current_time}.png"
|
165 |
filepath = os.path.join(output_dir, filename)
|
166 |
-
|
167 |
metadata_str = json.dumps(metadata)
|
168 |
info = PngImagePlugin.PngInfo()
|
169 |
info.add_text("metadata", metadata_str)
|
|
|
4 |
import numpy as np
|
5 |
import json
|
6 |
import torch
|
7 |
+
import uuid
|
8 |
from PIL import Image, PngImagePlugin
|
9 |
from datetime import datetime
|
10 |
from dataclasses import dataclass
|
|
|
159 |
return width, height
|
160 |
|
161 |
|
162 |
+
def save_image(image, metadata, output_dir, is_colab):
|
163 |
+
if is_colab:
|
164 |
+
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
|
165 |
+
filename = f"image_{current_time}.png"
|
166 |
+
else:
|
167 |
+
filename = str(uuid.uuid4()) + ".png"
|
168 |
os.makedirs(output_dir, exist_ok=True)
|
|
|
169 |
filepath = os.path.join(output_dir, filename)
|
|
|
170 |
metadata_str = json.dumps(metadata)
|
171 |
info = PngImagePlugin.PngInfo()
|
172 |
info.add_text("metadata", metadata_str)
|