nick_93 commited on
Commit
93b410f
1 Parent(s): f732e49
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -75,6 +75,7 @@ def get_segmentation_pipeline(
75
 
76
 
77
  @torch.inference_mode()
 
78
  def segment_image(
79
  image: Image,
80
  image_processor: AutoImageProcessor,
@@ -114,11 +115,12 @@ def get_depth_pipeline():
114
  feature_extractor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-large-hf",
115
  torch_dtype=dtype)
116
  depth_estimator = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-large-hf",
117
- torch_dtype=dtype).to(device)
118
  return feature_extractor, depth_estimator
119
 
120
 
121
  @torch.inference_mode()
 
122
  def get_depth_image(
123
  image: Image,
124
  feature_extractor: AutoImageProcessor,
@@ -209,6 +211,7 @@ class ControlNetDepthDesignModelMulti:
209
 
210
  self.seg_image_processor, self.image_segmentor = get_segmentation_pipeline()
211
  self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
 
212
 
213
  @spaces.GPU
214
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
@@ -223,6 +226,8 @@ class ControlNetDepthDesignModelMulti:
223
  If the size is not the same the submission will fail.
224
  """
225
  print(prompt)
 
 
226
  flush()
227
  self.generator = torch.Generator(device=device).manual_seed(self.seed)
228
 
 
75
 
76
 
77
  @torch.inference_mode()
78
+ @spaces.GPU
79
  def segment_image(
80
  image: Image,
81
  image_processor: AutoImageProcessor,
 
115
  feature_extractor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-large-hf",
116
  torch_dtype=dtype)
117
  depth_estimator = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-large-hf",
118
+ torch_dtype=dtype)
119
  return feature_extractor, depth_estimator
120
 
121
 
122
  @torch.inference_mode()
123
+ @spaces.GPU
124
  def get_depth_image(
125
  image: Image,
126
  feature_extractor: AutoImageProcessor,
 
211
 
212
  self.seg_image_processor, self.image_segmentor = get_segmentation_pipeline()
213
  self.depth_feature_extractor, self.depth_estimator = get_depth_pipeline()
214
+ self.depth_estimator = self.depth_estimator.to(device)
215
 
216
  @spaces.GPU
217
  def generate_design(self, empty_room_image: Image, prompt: str, guidance_scale: int = 10, num_steps: int = 50, strength: float =0.9, img_size: int = 640) -> Image:
 
226
  If the size is not the same the submission will fail.
227
  """
228
  print(prompt)
229
+ print(self.depth_estimator.device)
230
+ print(self.image_segmentor.device)
231
  flush()
232
  self.generator = torch.Generator(device=device).manual_seed(self.seed)
233