nick_93 commited on
Commit
f493b13
1 Parent(s): 8df3de7
Files changed (1) hide show
  1. app.py +37 -35
app.py CHANGED
@@ -19,6 +19,34 @@ device = "cuda"
19
  dtype = torch.float16
20
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  css = """
23
  #img-display-container {
24
  max-height: 50vh;
@@ -78,8 +106,8 @@ def get_segmentation_pipeline(
78
  @spaces.GPU
79
  def segment_image(
80
  image: Image,
81
- image_processor: AutoImageProcessor,
82
- image_segmentor: UperNetForSemanticSegmentation
83
  ) -> Image:
84
  """
85
  Segments an image using a semantic segmentation model.
@@ -182,38 +210,12 @@ class ControlNetDepthDesignModelMulti:
182
  def __init__(self):
183
  """ Initialize your model(s) here """
184
  #os.environ['HF_HUB_OFFLINE'] = "True"
185
- controlnet_depth= ControlNetModel.from_pretrained(
186
- "controlnet_depth", torch_dtype=dtype, use_safetensors=True)
187
- controlnet_seg = ControlNetModel.from_pretrained(
188
- "own_controlnet", torch_dtype=dtype, use_safetensors=True)
189
-
190
- self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
191
- "SG161222/Realistic_Vision_V5.1_noVAE",
192
- #"models/runwayml--stable-diffusion-inpainting",
193
- controlnet=[controlnet_depth, controlnet_seg],
194
- safety_checker=None,
195
- torch_dtype=dtype
196
- )
197
-
198
- self.pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models",
199
- weight_name="ip-adapter_sd15.bin")
200
- self.pipe.set_ip_adapter_scale(0.4)
201
- self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
202
- self.pipe = self.pipe.to(device)
203
- self.guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
204
- torch_dtype=dtype, use_safetensors=True, variant="fp16")
205
- self.guide_pipe = self.guide_pipe.to(device)
206
-
207
  self.seed = 323*111
208
  self.neg_prompt = "window, door, low resolution, banner, logo, watermark, text, deformed, blurry, out of focus, surreal, ugly, beginner"
209
  self.control_items = ["windowpane;window", "door;double;door"]
210
  self.additional_quality_suffix = "interior design, 4K, high resolution, photorealistic"
211
 
212
- self.seg_image_processor, self.image_segmentor = get_segmentation_pipeline()
213
- self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
214
- self.depth_estimator = self.depth_estimator.to(device)
215
-
216
-
217
  @spaces.GPU
218
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
219
  """
@@ -235,9 +237,9 @@ class ControlNetDepthDesignModelMulti:
235
  orig_w, orig_h = empty_room_image.size
236
  new_width, new_height = resize_dimensions(empty_room_image.size, img_size)
237
  input_image = empty_room_image.resize((new_width, new_height))
238
- real_seg = np.array(segment_image(input_image,
239
- self.seg_image_processor,
240
- self.image_segmentor))
241
  unique_colors = np.unique(real_seg.reshape(-1, real_seg.shape[2]), axis=0)
242
  unique_colors = [tuple(color) for color in unique_colors]
243
  segment_items = [map_colors_rgb(i) for i in unique_colors]
@@ -256,13 +258,13 @@ class ControlNetDepthDesignModelMulti:
256
  mask_image = Image.fromarray((mask * 255).astype(np.uint8)).convert("RGB")
257
  segmentation_cond_image = Image.fromarray(real_seg).convert("RGB")
258
 
259
- image_depth = get_depth_image(image, self.depth_feature_extractor, self.depth_estimator)
260
 
261
  # generate image that would be used as IP-adapter
262
  flush()
263
  new_width_ip = int(new_width / 8) * 8
264
  new_height_ip = int(new_height / 8) * 8
265
- ip_image = self.guide_pipe(pos_prompt,
266
  num_inference_steps=num_steps,
267
  negative_prompt=self.neg_prompt,
268
  height=new_height_ip,
@@ -270,7 +272,7 @@ class ControlNetDepthDesignModelMulti:
270
  generator=[self.generator]).images[0]
271
 
272
  flush()
273
- generated_image = self.pipe(
274
  prompt=pos_prompt,
275
  negative_prompt=self.neg_prompt,
276
  num_inference_steps=num_steps,
 
19
  dtype = torch.float16
20
 
21
 
22
+ controlnet_depth= ControlNetModel.from_pretrained(
23
+ "controlnet_depth", torch_dtype=dtype, use_safetensors=True)
24
+ controlnet_seg = ControlNetModel.from_pretrained(
25
+ "own_controlnet", torch_dtype=dtype, use_safetensors=True)
26
+
27
+ pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
28
+ "SG161222/Realistic_Vision_V5.1_noVAE",
29
+ #"models/runwayml--stable-diffusion-inpainting",
30
+ controlnet=[controlnet_depth, controlnet_seg],
31
+ safety_checker=None,
32
+ torch_dtype=dtype
33
+ )
34
+
35
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models",
36
+ weight_name="ip-adapter_sd15.bin")
37
+ pipe.set_ip_adapter_scale(0.4)
38
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
39
+ pipe = pipe.to(device)
40
+ guide_pipe = StableDiffusionXLPipeline.from_pretrained("segmind/SSD-1B",
41
+ torch_dtype=dtype, use_safetensors=True, variant="fp16")
42
+ guide_pipe = guide_pipe.to(device)
43
+
44
+ seg_image_processor, image_segmentor = get_segmentation_pipeline()
45
+ depth_feature_extractor, depth_estimator = get_depth_pipeline()
46
+ depth_estimator = depth_estimator.to(device)
47
+
48
+
49
+
50
  css = """
51
  #img-display-container {
52
  max-height: 50vh;
 
106
  @spaces.GPU
107
  def segment_image(
108
  image: Image,
109
+ #image_processor: AutoImageProcessor,
110
+ #image_segmentor: UperNetForSemanticSegmentation
111
  ) -> Image:
112
  """
113
  Segments an image using a semantic segmentation model.
 
210
  def __init__(self):
211
  """ Initialize your model(s) here """
212
  #os.environ['HF_HUB_OFFLINE'] = "True"
213
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  self.seed = 323*111
215
  self.neg_prompt = "window, door, low resolution, banner, logo, watermark, text, deformed, blurry, out of focus, surreal, ugly, beginner"
216
  self.control_items = ["windowpane;window", "door;double;door"]
217
  self.additional_quality_suffix = "interior design, 4K, high resolution, photorealistic"
218
 
 
 
 
 
 
219
  @spaces.GPU
220
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
221
  """
 
237
  orig_w, orig_h = empty_room_image.size
238
  new_width, new_height = resize_dimensions(empty_room_image.size, img_size)
239
  input_image = empty_room_image.resize((new_width, new_height))
240
+ real_seg = np.array(segment_image(input_image))#,
241
+ #seg_image_processor,
242
+ #image_segmentor))
243
  unique_colors = np.unique(real_seg.reshape(-1, real_seg.shape[2]), axis=0)
244
  unique_colors = [tuple(color) for color in unique_colors]
245
  segment_items = [map_colors_rgb(i) for i in unique_colors]
 
258
  mask_image = Image.fromarray((mask * 255).astype(np.uint8)).convert("RGB")
259
  segmentation_cond_image = Image.fromarray(real_seg).convert("RGB")
260
 
261
+ image_depth = get_depth_image(image, depth_feature_extractor, depth_estimator)
262
 
263
  # generate image that would be used as IP-adapter
264
  flush()
265
  new_width_ip = int(new_width / 8) * 8
266
  new_height_ip = int(new_height / 8) * 8
267
+ ip_image = guide_pipe(pos_prompt,
268
  num_inference_steps=num_steps,
269
  negative_prompt=self.neg_prompt,
270
  height=new_height_ip,
 
272
  generator=[self.generator]).images[0]
273
 
274
  flush()
275
+ generated_image = pipe(
276
  prompt=pos_prompt,
277
  negative_prompt=self.neg_prompt,
278
  num_inference_steps=num_steps,