Spaces:
Runtime error
Runtime error
- apps/third_party/Wonder3D/mvdiffusion/data/objaverse_dataset.py +1 -1
- craftsman/data/__pycache__/objaverse.cpython-38.pyc +0 -0
- craftsman/data/objaverse.py +4 -4
- craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-38.pyc +0 -0
- craftsman/models/conditional_encoders/clip/modeling_clip.py +1 -1
- craftsman/models/geometry/__pycache__/base.cpython-38.pyc +0 -0
- craftsman/models/geometry/base.py +1 -1
- craftsman/systems/__pycache__/shape_diffusion.cpython-38.pyc +0 -0
- craftsman/systems/shape_diffusion.py +1 -1
- craftsman/utils/__pycache__/base.cpython-38.pyc +0 -0
- craftsman/utils/__pycache__/config.cpython-38.pyc +0 -0
- craftsman/utils/__pycache__/misc.cpython-38.pyc +0 -0
- craftsman/utils/base.py +2 -2
- craftsman/utils/config.py +4 -4
- craftsman/utils/misc.py +1 -1
- gradio_app.py +2 -2
apps/third_party/Wonder3D/mvdiffusion/data/objaverse_dataset.py
CHANGED
@@ -25,7 +25,7 @@ class ObjaverseDataset(Dataset):
|
|
25 |
def __init__(self,
|
26 |
root_dir: str,
|
27 |
num_views: int,
|
28 |
-
bg_color
|
29 |
img_wh: Tuple[int, int],
|
30 |
object_list: str,
|
31 |
groups_num: int=1,
|
|
|
25 |
def __init__(self,
|
26 |
root_dir: str,
|
27 |
num_views: int,
|
28 |
+
bg_color,
|
29 |
img_wh: Tuple[int, int],
|
30 |
object_list: str,
|
31 |
groups_num: int=1,
|
craftsman/data/__pycache__/objaverse.cpython-38.pyc
CHANGED
Binary files a/craftsman/data/__pycache__/objaverse.cpython-38.pyc and b/craftsman/data/__pycache__/objaverse.cpython-38.pyc differ
|
|
craftsman/data/objaverse.py
CHANGED
@@ -101,7 +101,7 @@ class ObjaverseDataset(Dataset):
|
|
101 |
def __len__(self):
|
102 |
return len(self.uids)
|
103 |
|
104 |
-
def _load_shape(self, index: int)
|
105 |
if self.cfg.data_type == "occupancy":
|
106 |
# for input point cloud
|
107 |
pointcloud = np.load(f'{self.cfg.root_dir}/{self.uids[index]}/pointcloud.npz')
|
@@ -130,7 +130,7 @@ class ObjaverseDataset(Dataset):
|
|
130 |
|
131 |
return ret
|
132 |
|
133 |
-
def _load_shape_supervision(self, index: int)
|
134 |
# for supervision
|
135 |
ret = {}
|
136 |
if self.cfg.data_type == "occupancy":
|
@@ -166,7 +166,7 @@ class ObjaverseDataset(Dataset):
|
|
166 |
|
167 |
return ret
|
168 |
|
169 |
-
def _load_image(self, index: int)
|
170 |
def _load_single_image(img_path):
|
171 |
img = torch.from_numpy(
|
172 |
np.asarray(
|
@@ -209,7 +209,7 @@ class ObjaverseDataset(Dataset):
|
|
209 |
|
210 |
return ret
|
211 |
|
212 |
-
def _load_caption(self, index: int, drop_text_embed: bool = False)
|
213 |
ret = {}
|
214 |
if self.cfg.caption_type == "text":
|
215 |
caption = eval(json.load(open(f'{self.cfg.image_data_path}/' + "/".join(self.uids[index].split('/')[-2:]) + f'/annotation.json')))
|
|
|
101 |
def __len__(self):
|
102 |
return len(self.uids)
|
103 |
|
104 |
+
def _load_shape(self, index: int):
|
105 |
if self.cfg.data_type == "occupancy":
|
106 |
# for input point cloud
|
107 |
pointcloud = np.load(f'{self.cfg.root_dir}/{self.uids[index]}/pointcloud.npz')
|
|
|
130 |
|
131 |
return ret
|
132 |
|
133 |
+
def _load_shape_supervision(self, index: int):
|
134 |
# for supervision
|
135 |
ret = {}
|
136 |
if self.cfg.data_type == "occupancy":
|
|
|
166 |
|
167 |
return ret
|
168 |
|
169 |
+
def _load_image(self, index: int):
|
170 |
def _load_single_image(img_path):
|
171 |
img = torch.from_numpy(
|
172 |
np.asarray(
|
|
|
209 |
|
210 |
return ret
|
211 |
|
212 |
+
def _load_caption(self, index: int, drop_text_embed: bool = False):
|
213 |
ret = {}
|
214 |
if self.cfg.caption_type == "text":
|
215 |
caption = eval(json.load(open(f'{self.cfg.image_data_path}/' + "/".join(self.uids[index].split('/')[-2:]) + f'/annotation.json')))
|
craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-38.pyc
CHANGED
Binary files a/craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-38.pyc and b/craftsman/models/conditional_encoders/clip/__pycache__/modeling_clip.cpython-38.pyc differ
|
|
craftsman/models/conditional_encoders/clip/modeling_clip.py
CHANGED
@@ -149,7 +149,7 @@ class CLIPOutput(ModelOutput):
|
|
149 |
text_model_output: BaseModelOutputWithPooling = None
|
150 |
vision_model_output: BaseModelOutputWithPooling = None
|
151 |
|
152 |
-
def to_tuple(self)
|
153 |
return tuple(
|
154 |
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
155 |
for k in self.keys()
|
|
|
149 |
text_model_output: BaseModelOutputWithPooling = None
|
150 |
vision_model_output: BaseModelOutputWithPooling = None
|
151 |
|
152 |
+
def to_tuple(self):
|
153 |
return tuple(
|
154 |
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
155 |
for k in self.keys()
|
craftsman/models/geometry/__pycache__/base.cpython-38.pyc
CHANGED
Binary files a/craftsman/models/geometry/__pycache__/base.cpython-38.pyc and b/craftsman/models/geometry/__pycache__/base.cpython-38.pyc differ
|
|
craftsman/models/geometry/base.py
CHANGED
@@ -32,7 +32,7 @@ class BaseGeometry(BaseModule):
|
|
32 |
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
|
33 |
)
|
34 |
|
35 |
-
def export(self, *args, **kwargs)
|
36 |
return {}
|
37 |
|
38 |
|
|
|
32 |
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
|
33 |
)
|
34 |
|
35 |
+
def export(self, *args, **kwargs):
|
36 |
return {}
|
37 |
|
38 |
|
craftsman/systems/__pycache__/shape_diffusion.cpython-38.pyc
CHANGED
Binary files a/craftsman/systems/__pycache__/shape_diffusion.cpython-38.pyc and b/craftsman/systems/__pycache__/shape_diffusion.cpython-38.pyc differ
|
|
craftsman/systems/shape_diffusion.py
CHANGED
@@ -163,7 +163,7 @@ class ShapeDiffusionSystem(BaseSystem):
|
|
163 |
|
164 |
self.z_scale_factor = self.cfg.z_scale_factor
|
165 |
|
166 |
-
def forward(self, batch: Dict[str, Any])
|
167 |
# encode shape latents
|
168 |
shape_embeds, kl_embed, posterior = self.shape_model.encode(
|
169 |
batch["surface"][..., :3 + self.cfg.shape_model.point_feats],
|
|
|
163 |
|
164 |
self.z_scale_factor = self.cfg.z_scale_factor
|
165 |
|
166 |
+
def forward(self, batch: Dict[str, Any]):
|
167 |
# encode shape latents
|
168 |
shape_embeds, kl_embed, posterior = self.shape_model.encode(
|
169 |
batch["surface"][..., :3 + self.cfg.shape_model.point_feats],
|
craftsman/utils/__pycache__/base.cpython-38.pyc
CHANGED
Binary files a/craftsman/utils/__pycache__/base.cpython-38.pyc and b/craftsman/utils/__pycache__/base.cpython-38.pyc differ
|
|
craftsman/utils/__pycache__/config.cpython-38.pyc
CHANGED
Binary files a/craftsman/utils/__pycache__/config.cpython-38.pyc and b/craftsman/utils/__pycache__/config.cpython-38.pyc differ
|
|
craftsman/utils/__pycache__/misc.cpython-38.pyc
CHANGED
Binary files a/craftsman/utils/__pycache__/misc.cpython-38.pyc and b/craftsman/utils/__pycache__/misc.cpython-38.pyc differ
|
|
craftsman/utils/base.py
CHANGED
@@ -57,12 +57,12 @@ class Updateable:
|
|
57 |
pass
|
58 |
|
59 |
|
60 |
-
def update_if_possible(module
|
61 |
if isinstance(module, Updateable):
|
62 |
module.do_update_step(epoch, global_step)
|
63 |
|
64 |
|
65 |
-
def update_end_if_possible(module
|
66 |
if isinstance(module, Updateable):
|
67 |
module.do_update_step_end(epoch, global_step)
|
68 |
|
|
|
57 |
pass
|
58 |
|
59 |
|
60 |
+
def update_if_possible(module, epoch: int, global_step: int) -> None:
|
61 |
if isinstance(module, Updateable):
|
62 |
module.do_update_step(epoch, global_step)
|
63 |
|
64 |
|
65 |
+
def update_end_if_possible(module, epoch: int, global_step: int) -> None:
|
66 |
if isinstance(module, Updateable):
|
67 |
module.do_update_step_end(epoch, global_step)
|
68 |
|
craftsman/utils/config.py
CHANGED
@@ -28,7 +28,7 @@ OmegaConf.register_new_resolver(
|
|
28 |
# ======================================================= #
|
29 |
|
30 |
|
31 |
-
def C_max(value
|
32 |
if isinstance(value, int) or isinstance(value, float):
|
33 |
pass
|
34 |
else:
|
@@ -101,7 +101,7 @@ class ExperimentConfig:
|
|
101 |
os.makedirs(self.trial_dir, exist_ok=True)
|
102 |
|
103 |
|
104 |
-
def load_config(*yamls: str, cli_args: list = [], from_string=False, **kwargs)
|
105 |
if from_string:
|
106 |
yaml_confs = [OmegaConf.create(s) for s in yamls]
|
107 |
else:
|
@@ -114,7 +114,7 @@ def load_config(*yamls: str, cli_args: list = [], from_string=False, **kwargs) -
|
|
114 |
return scfg
|
115 |
|
116 |
|
117 |
-
def config_to_primitive(config, resolve: bool = True)
|
118 |
return OmegaConf.to_container(config, resolve=resolve)
|
119 |
|
120 |
|
@@ -123,6 +123,6 @@ def dump_config(path: str, config) -> None:
|
|
123 |
OmegaConf.save(config=config, f=fp)
|
124 |
|
125 |
|
126 |
-
def parse_structured(fields
|
127 |
scfg = OmegaConf.structured(fields(**cfg))
|
128 |
return scfg
|
|
|
28 |
# ======================================================= #
|
29 |
|
30 |
|
31 |
+
def C_max(value) -> float:
|
32 |
if isinstance(value, int) or isinstance(value, float):
|
33 |
pass
|
34 |
else:
|
|
|
101 |
os.makedirs(self.trial_dir, exist_ok=True)
|
102 |
|
103 |
|
104 |
+
def load_config(*yamls: str, cli_args: list = [], from_string=False, **kwargs):
|
105 |
if from_string:
|
106 |
yaml_confs = [OmegaConf.create(s) for s in yamls]
|
107 |
else:
|
|
|
114 |
return scfg
|
115 |
|
116 |
|
117 |
+
def config_to_primitive(config, resolve: bool = True):
|
118 |
return OmegaConf.to_container(config, resolve=resolve)
|
119 |
|
120 |
|
|
|
123 |
OmegaConf.save(config=config, f=fp)
|
124 |
|
125 |
|
126 |
+
def parse_structured(fields, cfg: Optional[Union[dict, DictConfig]] = None):
|
127 |
scfg = OmegaConf.structured(fields(**cfg))
|
128 |
return scfg
|
craftsman/utils/misc.py
CHANGED
@@ -70,7 +70,7 @@ def load_module_weights(
|
|
70 |
return state_dict_to_load, ckpt["epoch"], ckpt["global_step"]
|
71 |
|
72 |
|
73 |
-
def C(value
|
74 |
if isinstance(value, int) or isinstance(value, float):
|
75 |
pass
|
76 |
else:
|
|
|
70 |
return state_dict_to_load, ckpt["epoch"], ckpt["global_step"]
|
71 |
|
72 |
|
73 |
+
def C(value, epoch: int, global_step: int) -> float:
|
74 |
if isinstance(value, int) or isinstance(value, float):
|
75 |
pass
|
76 |
else:
|
gradio_app.py
CHANGED
@@ -257,8 +257,8 @@ if __name__=="__main__":
|
|
257 |
with gr.Row():
|
258 |
background_choice = gr.Dropdown(label="Backgroud Choice", value="Auto Remove Background",choices=list(background_choice.keys()))
|
259 |
rmbg_type = gr.Dropdown(label="Backgroud Remove Type", value="rembg",choices=['sam', "rembg"])
|
260 |
-
backgroud_color = gr.ColorPicker(label="Background Color", value="#FFFFFF", interactive=True)
|
261 |
-
|
262 |
|
263 |
with gr.Row():
|
264 |
mvimg_guidance_scale = gr.Number(value=3.5, minimum=3, maximum=10, label="2D Guidance Scale")
|
|
|
257 |
with gr.Row():
|
258 |
background_choice = gr.Dropdown(label="Backgroud Choice", value="Auto Remove Background",choices=list(background_choice.keys()))
|
259 |
rmbg_type = gr.Dropdown(label="Backgroud Remove Type", value="rembg",choices=['sam', "rembg"])
|
260 |
+
# backgroud_color = gr.ColorPicker(label="Background Color", value="#FFFFFF", interactive=True)
|
261 |
+
backgroud_color = gr.ColorPicker(label="Background Color", value="#7F7F7F", interactive=True)
|
262 |
|
263 |
with gr.Row():
|
264 |
mvimg_guidance_scale = gr.Number(value=3.5, minimum=3, maximum=10, label="2D Guidance Scale")
|