Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,198 +1,198 @@
|
|
1 |
-
import
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
5 |
-
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
6 |
-
import natten
|
7 |
-
import gradio
|
8 |
-
from PIL import Image
|
9 |
-
|
10 |
-
|
11 |
-
"""
|
12 |
-
IMPORT MODEL
|
13 |
-
"""
|
14 |
-
|
15 |
-
#model generate depth image
|
16 |
-
depth_image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-large-hf", torch_dtype=torch.float16)
|
17 |
-
depth_model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-large-hf", torch_dtype=torch.float16)
|
18 |
-
depth_model = depth_model.cuda()
|
19 |
-
|
20 |
-
#model generate segment image
|
21 |
-
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
22 |
-
|
23 |
-
processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_dinat_large")
|
24 |
-
model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_dinat_large")
|
25 |
-
model = model.cuda()
|
26 |
-
|
27 |
-
#model generate image
|
28 |
-
|
29 |
-
#load depth controlnet, segmentation controlnet
|
30 |
-
controlnets = [
|
31 |
-
ControlNetModel.from_pretrained("Lam-Hung/controlnet_depth_interior", torch_dtype=torch.float16, use_safetensors=True),
|
32 |
-
ControlNetModel.from_pretrained("Lam-Hung/controlnet_segment_interior", torch_dtype=torch.float16, use_safetensors=True)
|
33 |
-
]
|
34 |
-
#load stable diffusion 1.5 and controlnets
|
35 |
-
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
36 |
-
"runwayml/stable-diffusion-v1-5", controlnet= controlnets, torch_dtype=torch.float16, use_safetensors=True
|
37 |
-
)
|
38 |
-
# take UniPCMultistepScheduler for faster inference
|
39 |
-
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
40 |
-
pipeline.load_lora_weights('Lam-Hung/controlnet_lora_interior', weight_name= "pytorch_lora_weights.safetensors", adapter_name="interior")
|
41 |
-
pipeline.to("cuda")
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
"""
|
46 |
-
IMPORT FUNCTION
|
47 |
-
"""
|
48 |
-
def ade_palette() -> list[list[int]]:
|
49 |
-
"""ADE20K palette that maps each class to RGB values."""
|
50 |
-
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
51 |
-
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
52 |
-
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
53 |
-
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
54 |
-
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
55 |
-
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
56 |
-
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
57 |
-
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
58 |
-
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
59 |
-
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
60 |
-
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
61 |
-
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
62 |
-
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
63 |
-
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
64 |
-
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
65 |
-
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
66 |
-
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
67 |
-
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
68 |
-
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
69 |
-
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
70 |
-
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
71 |
-
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
72 |
-
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
73 |
-
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
74 |
-
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
75 |
-
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
76 |
-
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
77 |
-
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
78 |
-
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
79 |
-
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
80 |
-
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
81 |
-
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
82 |
-
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
83 |
-
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
84 |
-
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
85 |
-
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
86 |
-
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
87 |
-
[102, 255, 0], [92, 0, 255]]
|
88 |
-
|
89 |
-
|
90 |
-
@torch.inference_mode
|
91 |
-
@space.GPU
|
92 |
-
def get_depth_image(image: Image) -> Image:
|
93 |
-
|
94 |
-
"""
|
95 |
-
create depth image
|
96 |
-
"""
|
97 |
-
|
98 |
-
image_to_depth = depth_image_processor(images=image, return_tensors="pt").to("cuda")
|
99 |
-
with torch.no_grad():
|
100 |
-
depth_map = depth_model(**image_to_depth).predicted_depth
|
101 |
-
|
102 |
-
width, height = image.size
|
103 |
-
depth_map = torch.nn.functional.interpolate(
|
104 |
-
depth_map.unsqueeze(1).float(),
|
105 |
-
size=(height, width),
|
106 |
-
mode="bicubic",
|
107 |
-
align_corners=False,
|
108 |
-
)
|
109 |
-
depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
|
110 |
-
depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
|
111 |
-
depth_map = (depth_map - depth_min) / (depth_max - depth_min)
|
112 |
-
image = torch.cat([depth_map] * 3, dim=1)
|
113 |
-
|
114 |
-
image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
|
115 |
-
image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
|
116 |
-
return image
|
117 |
-
|
118 |
-
@torch.inference_mode
|
119 |
-
@space.GPU
|
120 |
-
def get_segmentation_of_room(image: Image):
|
121 |
-
#-> tuple[np.ndarray, Image]:
|
122 |
-
|
123 |
-
"""
|
124 |
-
create instance segmentation image
|
125 |
-
"""
|
126 |
-
|
127 |
-
# Semantic Segmentation
|
128 |
-
with torch.inference_mode():
|
129 |
-
semantic_inputs = processor(images=image, task_inputs=["semantic"], return_tensors="pt")
|
130 |
-
semantic_inputs = {key: value.to("cuda") for key, value in semantic_inputs.items()}
|
131 |
-
semantic_outputs = model(**semantic_inputs)
|
132 |
-
# pass through image_processor for postprocessing
|
133 |
-
predicted_semantic_map = \
|
134 |
-
processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0]
|
135 |
-
|
136 |
-
predicted_semantic_map = predicted_semantic_map.cpu()
|
137 |
-
color_seg = np.zeros((predicted_semantic_map.shape[0], predicted_semantic_map.shape[1], 3), dtype=np.uint8)
|
138 |
-
|
139 |
-
palette = np.array(ade_palette())
|
140 |
-
for label, color in enumerate(palette):
|
141 |
-
color_seg[predicted_semantic_map == label, :] = color
|
142 |
-
|
143 |
-
color_seg = color_seg.astype(np.uint8)
|
144 |
-
seg_image = Image.fromarray(color_seg).convert('RGB')
|
145 |
-
return seg_image
|
146 |
-
|
147 |
-
@torch.inference_mode
|
148 |
-
@space.GPU
|
149 |
-
def interior_inference(image,
|
150 |
-
prompt,
|
151 |
-
negative_prompt="window, door, low resolution, banner, logo, watermark, text, deformed, blurry, out of focus, surreal, ugly, beginner",
|
152 |
-
num_inference_steps=25,
|
153 |
-
depth_weight=0.9,
|
154 |
-
segment_weight=0.9,
|
155 |
-
lora_weight=0.7,
|
156 |
-
seed= 123):
|
157 |
-
|
158 |
-
depth_image = get_depth_image(image)
|
159 |
-
segmentation_image = get_segmentation_of_room(image)
|
160 |
-
prompt = prompt + " interior design, 4K, high resolution, photorealistic"
|
161 |
-
|
162 |
-
image_interior = pipeline(
|
163 |
-
prompt,
|
164 |
-
negative_prompt = negative_prompt,
|
165 |
-
image = [depth_image, segmentation_image],
|
166 |
-
num_inference_steps = num_inference_steps,
|
167 |
-
generator = torch.manual_seed(seed),
|
168 |
-
|
169 |
-
#lora_scale if enable_lora
|
170 |
-
cross_attention_kwargs={"scale": lora_weight},
|
171 |
-
controlnet_conditioning_scale=[depth_weight, segment_weight],
|
172 |
-
).images[0]
|
173 |
-
|
174 |
-
return image_interior
|
175 |
-
|
176 |
-
interface = gr.Interface(
|
177 |
-
fn = interior_inference,
|
178 |
-
inputs = [
|
179 |
-
gr.Image(type = "pil", label = "Empty room image", show_label = True),
|
180 |
-
gr.Textbox(label = "Prompt", lines = 3, placeholder = "Enter your prompt here"),
|
181 |
-
],
|
182 |
-
outputs=[
|
183 |
-
gr.Image(type = "pil", label = "Interior design", show_label = True),
|
184 |
-
],
|
185 |
-
additional_inputs=[
|
186 |
-
gr.Textbox(label = "Negative prompt", lines = 3, placeholder = "Enter your negative prompt here"),
|
187 |
-
gr.Slider(label = "Number of inference steps", minimum = 1, maximum = 100, value = 25, step = 1),
|
188 |
-
gr.Slider(label = "Depth weight", minimum = 0, maximum = 1, value = 0.9, step = 0.1),
|
189 |
-
gr.Slider(label = "Segment weight", minimum = 0, maximum = 1, value = 0.9, step = 0.1),
|
190 |
-
gr.Slider(label = "Lora weight", minimum = 0, maximum = 1, value = 0.7, step = 0.1),
|
191 |
-
gr.Number(label = "Seed", value = 123),
|
192 |
-
],
|
193 |
-
title="INTERIOR DESIGN",
|
194 |
-
description="**We will design your empty room become the beautiful room",
|
195 |
-
)
|
196 |
-
|
197 |
-
if "__name__" =="__main__":
|
198 |
interface.launch()
|
|
|
1 |
+
import spaces
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
5 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
6 |
+
import natten
|
7 |
+
import gradio
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
|
11 |
+
"""
|
12 |
+
IMPORT MODEL
|
13 |
+
"""
|
14 |
+
|
15 |
+
#model generate depth image
|
16 |
+
depth_image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-large-hf", torch_dtype=torch.float16)
|
17 |
+
depth_model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-large-hf", torch_dtype=torch.float16)
|
18 |
+
depth_model = depth_model.cuda()
|
19 |
+
|
20 |
+
#model generate segment image
|
21 |
+
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
22 |
+
|
23 |
+
processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_dinat_large")
|
24 |
+
model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_dinat_large")
|
25 |
+
model = model.cuda()
|
26 |
+
|
27 |
+
#model generate image
|
28 |
+
|
29 |
+
#load depth controlnet, segmentation controlnet
|
30 |
+
controlnets = [
|
31 |
+
ControlNetModel.from_pretrained("Lam-Hung/controlnet_depth_interior", torch_dtype=torch.float16, use_safetensors=True),
|
32 |
+
ControlNetModel.from_pretrained("Lam-Hung/controlnet_segment_interior", torch_dtype=torch.float16, use_safetensors=True)
|
33 |
+
]
|
34 |
+
#load stable diffusion 1.5 and controlnets
|
35 |
+
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
|
36 |
+
"runwayml/stable-diffusion-v1-5", controlnet= controlnets, torch_dtype=torch.float16, use_safetensors=True
|
37 |
+
)
|
38 |
+
# take UniPCMultistepScheduler for faster inference
|
39 |
+
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
|
40 |
+
pipeline.load_lora_weights('Lam-Hung/controlnet_lora_interior', weight_name= "pytorch_lora_weights.safetensors", adapter_name="interior")
|
41 |
+
pipeline.to("cuda")
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
"""
|
46 |
+
IMPORT FUNCTION
|
47 |
+
"""
|
48 |
+
def ade_palette() -> list[list[int]]:
|
49 |
+
"""ADE20K palette that maps each class to RGB values."""
|
50 |
+
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
51 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
52 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
53 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
54 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
55 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
56 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
57 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
58 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
59 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
60 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
61 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
62 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
63 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
64 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
65 |
+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
66 |
+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
67 |
+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
68 |
+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
69 |
+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
70 |
+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
71 |
+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
72 |
+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
73 |
+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
74 |
+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
75 |
+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
76 |
+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
77 |
+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
78 |
+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
79 |
+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
80 |
+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
81 |
+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
82 |
+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
83 |
+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
84 |
+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
85 |
+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
86 |
+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
87 |
+
[102, 255, 0], [92, 0, 255]]
|
88 |
+
|
89 |
+
|
90 |
+
@torch.inference_mode
|
91 |
+
@space.GPU
|
92 |
+
def get_depth_image(image: Image) -> Image:
|
93 |
+
|
94 |
+
"""
|
95 |
+
create depth image
|
96 |
+
"""
|
97 |
+
|
98 |
+
image_to_depth = depth_image_processor(images=image, return_tensors="pt").to("cuda")
|
99 |
+
with torch.no_grad():
|
100 |
+
depth_map = depth_model(**image_to_depth).predicted_depth
|
101 |
+
|
102 |
+
width, height = image.size
|
103 |
+
depth_map = torch.nn.functional.interpolate(
|
104 |
+
depth_map.unsqueeze(1).float(),
|
105 |
+
size=(height, width),
|
106 |
+
mode="bicubic",
|
107 |
+
align_corners=False,
|
108 |
+
)
|
109 |
+
depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
|
110 |
+
depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
|
111 |
+
depth_map = (depth_map - depth_min) / (depth_max - depth_min)
|
112 |
+
image = torch.cat([depth_map] * 3, dim=1)
|
113 |
+
|
114 |
+
image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
|
115 |
+
image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
|
116 |
+
return image
|
117 |
+
|
118 |
+
@torch.inference_mode
|
119 |
+
@space.GPU
|
120 |
+
def get_segmentation_of_room(image: Image):
|
121 |
+
#-> tuple[np.ndarray, Image]:
|
122 |
+
|
123 |
+
"""
|
124 |
+
create instance segmentation image
|
125 |
+
"""
|
126 |
+
|
127 |
+
# Semantic Segmentation
|
128 |
+
with torch.inference_mode():
|
129 |
+
semantic_inputs = processor(images=image, task_inputs=["semantic"], return_tensors="pt")
|
130 |
+
semantic_inputs = {key: value.to("cuda") for key, value in semantic_inputs.items()}
|
131 |
+
semantic_outputs = model(**semantic_inputs)
|
132 |
+
# pass through image_processor for postprocessing
|
133 |
+
predicted_semantic_map = \
|
134 |
+
processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0]
|
135 |
+
|
136 |
+
predicted_semantic_map = predicted_semantic_map.cpu()
|
137 |
+
color_seg = np.zeros((predicted_semantic_map.shape[0], predicted_semantic_map.shape[1], 3), dtype=np.uint8)
|
138 |
+
|
139 |
+
palette = np.array(ade_palette())
|
140 |
+
for label, color in enumerate(palette):
|
141 |
+
color_seg[predicted_semantic_map == label, :] = color
|
142 |
+
|
143 |
+
color_seg = color_seg.astype(np.uint8)
|
144 |
+
seg_image = Image.fromarray(color_seg).convert('RGB')
|
145 |
+
return seg_image
|
146 |
+
|
147 |
+
@torch.inference_mode
|
148 |
+
@space.GPU
|
149 |
+
def interior_inference(image,
|
150 |
+
prompt,
|
151 |
+
negative_prompt="window, door, low resolution, banner, logo, watermark, text, deformed, blurry, out of focus, surreal, ugly, beginner",
|
152 |
+
num_inference_steps=25,
|
153 |
+
depth_weight=0.9,
|
154 |
+
segment_weight=0.9,
|
155 |
+
lora_weight=0.7,
|
156 |
+
seed= 123):
|
157 |
+
|
158 |
+
depth_image = get_depth_image(image)
|
159 |
+
segmentation_image = get_segmentation_of_room(image)
|
160 |
+
prompt = prompt + " interior design, 4K, high resolution, photorealistic"
|
161 |
+
|
162 |
+
image_interior = pipeline(
|
163 |
+
prompt,
|
164 |
+
negative_prompt = negative_prompt,
|
165 |
+
image = [depth_image, segmentation_image],
|
166 |
+
num_inference_steps = num_inference_steps,
|
167 |
+
generator = torch.manual_seed(seed),
|
168 |
+
|
169 |
+
#lora_scale if enable_lora
|
170 |
+
cross_attention_kwargs={"scale": lora_weight},
|
171 |
+
controlnet_conditioning_scale=[depth_weight, segment_weight],
|
172 |
+
).images[0]
|
173 |
+
|
174 |
+
return image_interior
|
175 |
+
|
176 |
+
interface = gr.Interface(
|
177 |
+
fn = interior_inference,
|
178 |
+
inputs = [
|
179 |
+
gr.Image(type = "pil", label = "Empty room image", show_label = True),
|
180 |
+
gr.Textbox(label = "Prompt", lines = 3, placeholder = "Enter your prompt here"),
|
181 |
+
],
|
182 |
+
outputs=[
|
183 |
+
gr.Image(type = "pil", label = "Interior design", show_label = True),
|
184 |
+
],
|
185 |
+
additional_inputs=[
|
186 |
+
gr.Textbox(label = "Negative prompt", lines = 3, placeholder = "Enter your negative prompt here"),
|
187 |
+
gr.Slider(label = "Number of inference steps", minimum = 1, maximum = 100, value = 25, step = 1),
|
188 |
+
gr.Slider(label = "Depth weight", minimum = 0, maximum = 1, value = 0.9, step = 0.1),
|
189 |
+
gr.Slider(label = "Segment weight", minimum = 0, maximum = 1, value = 0.9, step = 0.1),
|
190 |
+
gr.Slider(label = "Lora weight", minimum = 0, maximum = 1, value = 0.7, step = 0.1),
|
191 |
+
gr.Number(label = "Seed", value = 123),
|
192 |
+
],
|
193 |
+
title="INTERIOR DESIGN",
|
194 |
+
description="**We will design your empty room become the beautiful room",
|
195 |
+
)
|
196 |
+
|
197 |
+
if "__name__" =="__main__":
|
198 |
interface.launch()
|