radames commited on
Commit
11e5217
1 Parent(s): cd0b70c
Files changed (2) hide show
  1. app.py +120 -30
  2. requirements.txt +6 -6
app.py CHANGED
@@ -5,7 +5,7 @@ import numpy as np
5
  from diffusers import StableDiffusionDepth2ImgPipeline
6
  from pathlib import Path
7
 
8
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
  dept2img = StableDiffusionDepth2ImgPipeline.from_pretrained(
10
  "stabilityai/stable-diffusion-2-depth",
11
  torch_dtype=torch.float16,
@@ -13,10 +13,14 @@ dept2img = StableDiffusionDepth2ImgPipeline.from_pretrained(
13
 
14
 
15
  def pad_image(input_image):
16
- pad_w, pad_h = np.max(((2, 2), np.ceil(
17
- np.array(input_image.size) / 64).astype(int)), axis=0) * 64 - input_image.size
 
 
 
18
  im_padded = Image.fromarray(
19
- np.pad(np.array(input_image), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
 
20
  w, h = im_padded.size
21
  if w == h:
22
  return im_padded
@@ -30,7 +34,17 @@ def pad_image(input_image):
30
  return new_image
31
 
32
 
33
- def predict(input_image, prompt, negative_prompt, steps, num_samples, scale, seed, strength, depth_image=None):
 
 
 
 
 
 
 
 
 
 
34
  depth = None
35
  if depth_image is not None:
36
  depth_image = pad_image(depth_image)
@@ -56,32 +70,44 @@ def predict(input_image, prompt, negative_prompt, steps, num_samples, scale, see
56
  guidance_scale=scale,
57
  num_images_per_prompt=num_samples,
58
  )
59
- return result['images']
60
 
61
 
62
- block = gr.Blocks().queue()
63
- with block:
 
 
 
 
64
  with gr.Row():
65
  with gr.Column():
66
  gr.Markdown("## Stable Diffusion 2 Depth2Img")
67
- gr.HTML("<p><a href='https://huggingface.co/spaces/radames/stable-diffusion-depth2img?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
68
-
 
69
 
70
  with gr.Row():
71
  with gr.Column():
72
  input_image = gr.Image(type="pil")
73
- depth_image = gr.Image(type="pil", label="Depth Image Optional")
 
74
  prompt = gr.Textbox(label="Prompt")
75
  negative_prompt = gr.Textbox(label="Negative Prompt")
76
 
77
  run_button = gr.Button("Run")
78
  with gr.Accordion("Advanced Options", open=False):
79
  num_samples = gr.Slider(
80
- label="Images", minimum=1, maximum=4, value=1, step=1)
81
- steps = gr.Slider(label="Steps", minimum=1,
82
- maximum=50, value=50, step=1)
 
 
83
  scale = gr.Slider(
84
- label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1
 
 
 
 
85
  )
86
  strength = gr.Slider(
87
  label="Strength", minimum=0.0, maximum=1.0, value=0.9, step=0.01
@@ -93,26 +119,90 @@ with block:
93
  step=1,
94
  randomize=True,
95
  )
96
- with gr.Column():
97
- gallery = gr.Gallery(label="Generated Images", show_label=False).style(grid=[2])
 
 
 
 
 
98
  gr.Examples(
99
  examples=[
100
- ["./examples/baby.jpg", "high definition photo of a baby astronaut space walking at the international space station with earth seeing from above in the background",
101
- "", 50, 4, 9.0, 123123123, 0.8, None],
102
- ["./examples/gol.jpg", "professional photo of a Elmo jumping between two high rises, beautiful colorful city landscape in the background",
103
- "", 50, 4, 9.0, 1734133747, 0.9, None],
104
- ["./examples/bag.jpg", "a photo of a bag of cookies in the bathroom", "low light, dark, blurry", 50, 4, 9.0, 1734133747, 0.9, "./examples/depth.jpg"],
105
- ["./examples/smile_face.jpg", "a hand holding a very spherical orange", "low light, dark, blurry", 50, 4, 6.0, 961736534, 0.5, "./examples/smile_depth.jpg"]
106
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  ],
108
- inputs=[input_image, prompt, negative_prompt, steps,
109
- num_samples, scale, seed, strength, depth_image],
110
  outputs=[gallery],
111
  fn=predict,
112
  cache_examples=True,
113
  )
114
- run_button.click(fn=predict, inputs=[input_image, prompt, negative_prompt,
115
- steps, num_samples, scale, seed, strength, depth_image], outputs=[gallery])
116
-
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
- block.launch(show_api=False)
 
 
5
  from diffusers import StableDiffusionDepth2ImgPipeline
6
  from pathlib import Path
7
 
8
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  dept2img = StableDiffusionDepth2ImgPipeline.from_pretrained(
10
  "stabilityai/stable-diffusion-2-depth",
11
  torch_dtype=torch.float16,
 
13
 
14
 
15
  def pad_image(input_image):
16
+ pad_w, pad_h = (
17
+ np.max(((2, 2), np.ceil(np.array(input_image.size) / 64).astype(int)), axis=0)
18
+ * 64
19
+ - input_image.size
20
+ )
21
  im_padded = Image.fromarray(
22
+ np.pad(np.array(input_image), ((0, pad_h), (0, pad_w), (0, 0)), mode="edge")
23
+ )
24
  w, h = im_padded.size
25
  if w == h:
26
  return im_padded
 
34
  return new_image
35
 
36
 
37
+ def predict(
38
+ input_image,
39
+ prompt,
40
+ negative_prompt,
41
+ steps,
42
+ num_samples,
43
+ scale,
44
+ seed,
45
+ strength,
46
+ depth_image=None,
47
+ ):
48
  depth = None
49
  if depth_image is not None:
50
  depth_image = pad_image(depth_image)
 
70
  guidance_scale=scale,
71
  num_images_per_prompt=num_samples,
72
  )
73
+ return result["images"]
74
 
75
 
76
+ css = """
77
+ #gallery .fixed-height {
78
+ max-height: unset;
79
+ }
80
+ """
81
+ with gr.Blocks(css=css) as block:
82
  with gr.Row():
83
  with gr.Column():
84
  gr.Markdown("## Stable Diffusion 2 Depth2Img")
85
+ gr.HTML(
86
+ "<p><a href='https://huggingface.co/spaces/radames/stable-diffusion-depth2img?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>"
87
+ )
88
 
89
  with gr.Row():
90
  with gr.Column():
91
  input_image = gr.Image(type="pil")
92
+ with gr.Accordion("Depth Image Optional", open=False):
93
+ depth_image = gr.Image(type="pil")
94
  prompt = gr.Textbox(label="Prompt")
95
  negative_prompt = gr.Textbox(label="Negative Prompt")
96
 
97
  run_button = gr.Button("Run")
98
  with gr.Accordion("Advanced Options", open=False):
99
  num_samples = gr.Slider(
100
+ label="Images", minimum=1, maximum=4, value=1, step=1
101
+ )
102
+ steps = gr.Slider(
103
+ label="Steps", minimum=1, maximum=50, value=50, step=1
104
+ )
105
  scale = gr.Slider(
106
+ label="Guidance Scale",
107
+ minimum=0.1,
108
+ maximum=30.0,
109
+ value=9.0,
110
+ step=0.1,
111
  )
112
  strength = gr.Slider(
113
  label="Strength", minimum=0.0, maximum=1.0, value=0.9, step=0.01
 
119
  step=1,
120
  randomize=True,
121
  )
122
+ with gr.Column(scale=2):
123
+ with gr.Row():
124
+ gallery = gr.Gallery(
125
+ label="Generated Images",
126
+ show_label=False,
127
+ elem_id="gallery",
128
+ )
129
  gr.Examples(
130
  examples=[
131
+ [
132
+ "./examples/baby.jpg",
133
+ "high definition photo of a baby astronaut space walking at the international space station with earth seeing from above in the background",
134
+ "",
135
+ 50,
136
+ 4,
137
+ 9.0,
138
+ 123123123,
139
+ 0.8,
140
+ None,
141
+ ],
142
+ [
143
+ "./examples/gol.jpg",
144
+ "professional photo of a Elmo jumping between two high rises, beautiful colorful city landscape in the background",
145
+ "",
146
+ 50,
147
+ 4,
148
+ 9.0,
149
+ 1734133747,
150
+ 0.9,
151
+ None,
152
+ ],
153
+ [
154
+ "./examples/bag.jpg",
155
+ "a photo of a bag of cookies in the bathroom",
156
+ "low light, dark, blurry",
157
+ 50,
158
+ 4,
159
+ 9.0,
160
+ 1734133747,
161
+ 0.9,
162
+ "./examples/depth.jpg",
163
+ ],
164
+ [
165
+ "./examples/smile_face.jpg",
166
+ "a hand holding a very spherical orange",
167
+ "low light, dark, blurry",
168
+ 50,
169
+ 4,
170
+ 6.0,
171
+ 961736534,
172
+ 0.5,
173
+ "./examples/smile_depth.jpg",
174
+ ],
175
+ ],
176
+ inputs=[
177
+ input_image,
178
+ prompt,
179
+ negative_prompt,
180
+ steps,
181
+ num_samples,
182
+ scale,
183
+ seed,
184
+ strength,
185
+ depth_image,
186
  ],
 
 
187
  outputs=[gallery],
188
  fn=predict,
189
  cache_examples=True,
190
  )
191
+ run_button.click(
192
+ fn=predict,
193
+ inputs=[
194
+ input_image,
195
+ prompt,
196
+ negative_prompt,
197
+ steps,
198
+ num_samples,
199
+ scale,
200
+ seed,
201
+ strength,
202
+ depth_image,
203
+ ],
204
+ outputs=[gallery],
205
+ )
206
 
207
+ block.queue(api_open=False)
208
+ block.launch(show_api=False)
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- gradio
2
- diffusers
3
- transformers
4
- torch
5
- accelerate
6
- xformers
 
1
+ diffusers==0.24.0
2
+ gradio==4.9.1
3
+ numpy==1.26.2
4
+ Pillow==10.1.0
5
+ Pillow==10.1.0
6
+ torch==2.1.2