anzorq commited on
Commit
0b1c1d9
1 Parent(s): b8ecb9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -55
app.py CHANGED
@@ -7,9 +7,6 @@ import utils
7
 
8
  is_colab = utils.is_google_colab()
9
 
10
- max_width = 832
11
- max_height = 832
12
-
13
  class Model:
14
  def __init__(self, name, path, prefix):
15
  self.name = name
@@ -26,13 +23,13 @@ models = [
26
  Model("Classic Disney", "nitrosocke/classic-anim-diffusion", ""),
27
  Model("Waifu", "hakurei/waifu-diffusion", ""),
28
  Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
29
- Model("Fuyuko Waifu", "yuk/fuyuko-waifu-diffusion", ""),
30
  Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
31
  Model("Robo Diffusion", "nousr/robo-diffusion", ""),
32
  Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
33
- Model("Hergé Style", "sd-dreambooth-library/herge-style", "herge_style "),
34
  ]
35
 
 
36
  current_model = models[1]
37
  current_model_path = current_model.path
38
  pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
@@ -63,56 +60,66 @@ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0
63
 
64
  def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None):
65
 
 
66
  global pipe
67
  global current_model_path
68
- if model_path != current_model_path:
69
  current_model_path = model_path
70
 
71
  pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
72
  if torch.cuda.is_available():
73
  pipe = pipe.to("cuda")
 
74
 
75
  prompt = current_model.prefix + prompt
76
- results = pipe(
77
  prompt,
78
- negative_prompt=neg_prompt,
79
- num_inference_steps=int(steps),
80
- guidance_scale=guidance,
81
- width=width,
82
- height=height,
83
- generator=generator)
 
84
 
85
- image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
86
- return image
87
 
88
- def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
89
 
 
90
  global pipe
91
  global current_model_path
92
- if model_path != current_model_path:
93
  current_model_path = model_path
94
 
95
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
96
 
97
  if torch.cuda.is_available():
98
  pipe = pipe.to("cuda")
 
99
 
100
  prompt = current_model.prefix + prompt
101
- ratio = min(max_height / img.height, max_width / img.width)
102
- img = img.resize((int(img.width * ratio), int(img.height * ratio)))
103
- results = pipe(
104
  prompt,
105
- negative_prompt=neg_prompt,
106
- init_image=img,
107
- num_inference_steps=int(steps),
108
- strength=strength,
109
- guidance_scale=guidance,
110
- width=width,
111
- height=height,
112
- generator=generator)
 
113
 
114
- image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
115
- return image
 
 
 
 
 
116
 
117
  css = """
118
  <style>
@@ -138,6 +145,13 @@ css = """
138
  .finetuned-diffusion-div p a {
139
  text-decoration: underline;
140
  }
 
 
 
 
 
 
 
141
  </style>
142
  """
143
  with gr.Blocks(css=css) as demo:
@@ -151,7 +165,7 @@ with gr.Blocks(css=css) as demo:
151
  Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
152
  <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/sd-dreambooth-library/herge-style">Hergé (Tintin)</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a> + any other custom Diffusers 🧨 SD model hosted on HuggingFace 🤗.
153
  </p>
154
- <p>Don't want to wait in queue? ➡️ <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
155
  Running on <b>{device}</b>
156
  </p>
157
  </div>
@@ -159,42 +173,58 @@ with gr.Blocks(css=css) as demo:
159
  )
160
  with gr.Row():
161
 
162
- with gr.Column():
163
  model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
164
  custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", visible=False, interactive=True)
165
- prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
166
- run = gr.Button(value="Run")
167
-
168
- with gr.Tab("Options"):
169
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
170
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
171
- steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2, step=1)
172
- width = gr.Slider(label="Width", value=512, maximum=max_width, minimum=64, step=8)
173
- height = gr.Slider(label="Height", value=512, maximum=max_height, minimum=64, step=8)
174
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
175
-
176
- with gr.Tab("Image to image"):
177
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
178
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
179
-
180
- with gr.Column():
181
  image_out = gr.Image(height=512)
182
- log = gr.Textbox()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_path)
185
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=log)
 
 
186
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
187
- prompt.submit(inference, inputs=inputs, outputs=image_out, scroll_to_output=True)
188
- run.click(inference, inputs=inputs, outputs=image_out, scroll_to_output=True)
189
-
190
- gr.Examples([
191
  [models[1].name, "jason bateman disassembling the demon core", 7.5, 50],
192
  [models[4].name, "portrait of dwayne johnson", 7.0, 75],
193
  [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
194
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
195
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
196
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=not is_colab and torch.cuda.is_available())
197
-
 
198
  gr.Markdown('''
199
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
200
  Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
 
7
 
8
  is_colab = utils.is_google_colab()
9
 
 
 
 
10
  class Model:
11
  def __init__(self, name, path, prefix):
12
  self.name = name
 
23
  Model("Classic Disney", "nitrosocke/classic-anim-diffusion", ""),
24
  Model("Waifu", "hakurei/waifu-diffusion", ""),
25
  Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
 
26
  Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
27
  Model("Robo Diffusion", "nousr/robo-diffusion", ""),
28
  Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
29
+ Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy")
30
  ]
31
 
32
+ last_mode = "txt2img"
33
  current_model = models[1]
34
  current_model_path = current_model.path
35
  pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
 
60
 
61
  def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None):
62
 
63
+ global last_mode
64
  global pipe
65
  global current_model_path
66
+ if model_path != current_model_path or last_mode != "txt2img":
67
  current_model_path = model_path
68
 
69
  pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
70
  if torch.cuda.is_available():
71
  pipe = pipe.to("cuda")
72
+ last_mode = "txt2img"
73
 
74
  prompt = current_model.prefix + prompt
75
+ result = pipe(
76
  prompt,
77
+ negative_prompt = neg_prompt,
78
+ # num_images_per_prompt=n_images,
79
+ num_inference_steps = int(steps),
80
+ guidance_scale = guidance,
81
+ width = width,
82
+ height = height,
83
+ generator = generator)
84
 
85
+ return replace_nsfw_images(result)
 
86
 
87
+ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None):
88
 
89
+ global last_mode
90
  global pipe
91
  global current_model_path
92
+ if model_path != current_model_path or last_mode != "img2img":
93
  current_model_path = model_path
94
 
95
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
96
 
97
  if torch.cuda.is_available():
98
  pipe = pipe.to("cuda")
99
+ last_mode = "img2img"
100
 
101
  prompt = current_model.prefix + prompt
102
+ ratio = min(height / img.height, width / img.width)
103
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
104
+ result = pipe(
105
  prompt,
106
+ negative_prompt = neg_prompt,
107
+ # num_images_per_prompt=n_images,
108
+ init_image = img,
109
+ num_inference_steps = int(steps),
110
+ strength = strength,
111
+ guidance_scale = guidance,
112
+ width = width,
113
+ height = height,
114
+ generator = generator)
115
 
116
+ return replace_nsfw_images(result)
117
+
118
+ def replace_nsfw_images(results):
119
+ for i in range(len(results.images)):
120
+ if results.nsfw_content_detected[i]:
121
+ results.images[i] = Image.open("nsfw.png")
122
+ return results.images[0]
123
 
124
  css = """
125
  <style>
 
145
  .finetuned-diffusion-div p a {
146
  text-decoration: underline;
147
  }
148
+ .tabs {
149
+ margin-top: 0px;
150
+ margin-bottom: 0px;
151
+ }
152
+ #gallery {
153
+ min-height: 20rem;
154
+ }
155
  </style>
156
  """
157
  with gr.Blocks(css=css) as demo:
 
165
  Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
166
  <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/sd-dreambooth-library/herge-style">Hergé (Tintin)</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a> + any other custom Diffusers 🧨 SD model hosted on HuggingFace 🤗.
167
  </p>
168
+ <p>Don't want to wait in queue? <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
169
  Running on <b>{device}</b>
170
  </p>
171
  </div>
 
173
  )
174
  with gr.Row():
175
 
176
+ with gr.Group():
177
  model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
178
  custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", visible=False, interactive=True)
179
+
180
+ with gr.Row():
181
+ prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
182
+ generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
183
+
184
+
 
 
 
 
 
 
 
 
 
 
185
  image_out = gr.Image(height=512)
186
+ # gallery = gr.Gallery(
187
+ # label="Generated images", show_label=False, elem_id="gallery"
188
+ # ).style(grid=[1], height="auto")
189
+
190
+ with gr.Tab("Options"):
191
+ with gr.Group():
192
+ neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
193
+
194
+ # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
195
+
196
+ with gr.Row():
197
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
198
+ steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=100, step=1)
199
+
200
+ with gr.Row():
201
+ width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
202
+ height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
203
+
204
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
205
+
206
+ with gr.Tab("Image to image"):
207
+ with gr.Group():
208
+ image = gr.Image(label="Image", height=256, tool="editor", type="pil")
209
+ strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
210
 
211
  model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_path)
212
+ custom_model_path.change(custom_model_changed, inputs=custom_model_path)
213
+ # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
214
+
215
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
216
+ prompt.submit(inference, inputs=inputs, outputs=image_out)
217
+ generate.click(inference, inputs=inputs, outputs=image_out)
218
+
219
+ ex = gr.Examples([
220
  [models[1].name, "jason bateman disassembling the demon core", 7.5, 50],
221
  [models[4].name, "portrait of dwayne johnson", 7.0, 75],
222
  [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
223
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
224
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
225
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=not is_colab and torch.cuda.is_available())
226
+ # ex.dataset.headers = [""]
227
+
228
  gr.Markdown('''
229
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
230
  Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)