Spaces:
Runtime error
Runtime error
waveydaveygravy
commited on
Commit
•
a545358
1
Parent(s):
9600e7d
Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ from src.audio2vid import get_headpose_temp, smooth_pose_seq
|
|
32 |
from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
|
33 |
|
34 |
|
35 |
-
config = OmegaConf.load('
|
36 |
if config.weight_dtype == "fp16":
|
37 |
weight_dtype = torch.float16
|
38 |
else:
|
@@ -167,7 +167,7 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
|
|
167 |
# [transforms.Resize((height, width)), transforms.ToTensor()]
|
168 |
# )
|
169 |
args_L = len(pose_images) if length==0 or length > len(pose_images) else length
|
170 |
-
args_L = min(args_L,
|
171 |
for pose_image_np in pose_images[: args_L : fi_step]:
|
172 |
# pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
|
173 |
# pose_tensor_list.append(pose_transform(pose_image_pil))
|
@@ -263,7 +263,7 @@ def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
|
|
263 |
verts_list = []
|
264 |
bs_list = []
|
265 |
args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
|
266 |
-
args_L = min(args_L, 90*step)
|
267 |
for src_image_pil in source_images[: args_L : step*fi_step]:
|
268 |
src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
|
269 |
frame_height, frame_width, _ = src_img_np.shape
|
@@ -389,20 +389,20 @@ with gr.Blocks() as demo:
|
|
389 |
a2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
|
390 |
|
391 |
with gr.Row():
|
392 |
-
a2v_length = gr.Slider(minimum=0, maximum=
|
393 |
a2v_seed = gr.Number(value=42, label="Seed (--seed)")
|
394 |
|
395 |
a2v_botton = gr.Button("Generate", variant="primary")
|
396 |
a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
|
397 |
|
398 |
-
gr.Examples(
|
399 |
-
examples=[
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
)
|
406 |
|
407 |
|
408 |
with gr.Tab("Video2video"):
|
@@ -417,20 +417,20 @@ with gr.Blocks() as demo:
|
|
417 |
v2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
|
418 |
|
419 |
with gr.Row():
|
420 |
-
v2v_length = gr.Slider(minimum=0, maximum=
|
421 |
v2v_seed = gr.Number(value=42, label="Seed (--seed)")
|
422 |
|
423 |
v2v_botton = gr.Button("Generate", variant="primary")
|
424 |
v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
|
425 |
|
426 |
-
gr.Examples(
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
)
|
434 |
|
435 |
a2v_botton.click(
|
436 |
fn=audio2video,
|
@@ -445,4 +445,4 @@ with gr.Blocks() as demo:
|
|
445 |
outputs=[v2v_output_video, v2v_ref_img]
|
446 |
)
|
447 |
|
448 |
-
demo.launch()
|
|
|
32 |
from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
|
33 |
|
34 |
|
35 |
+
config = OmegaConf.load('/content/AniPortrait_official/configs/prompts/animation_audio.yaml')
|
36 |
if config.weight_dtype == "fp16":
|
37 |
weight_dtype = torch.float16
|
38 |
else:
|
|
|
167 |
# [transforms.Resize((height, width)), transforms.ToTensor()]
|
168 |
# )
|
169 |
args_L = len(pose_images) if length==0 or length > len(pose_images) else length
|
170 |
+
#args_L = min(args_L, 9999)
|
171 |
for pose_image_np in pose_images[: args_L : fi_step]:
|
172 |
# pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
|
173 |
# pose_tensor_list.append(pose_transform(pose_image_pil))
|
|
|
263 |
verts_list = []
|
264 |
bs_list = []
|
265 |
args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
|
266 |
+
#args_L = min(args_L, 90*step)
|
267 |
for src_image_pil in source_images[: args_L : step*fi_step]:
|
268 |
src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
|
269 |
frame_height, frame_width, _ = src_img_np.shape
|
|
|
389 |
a2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
|
390 |
|
391 |
with gr.Row():
|
392 |
+
a2v_length = gr.Slider(minimum=0, maximum=9999, step=1, value=30, label="Length (-L) (Set to 0 to automatically calculate length)")
|
393 |
a2v_seed = gr.Number(value=42, label="Seed (--seed)")
|
394 |
|
395 |
a2v_botton = gr.Button("Generate", variant="primary")
|
396 |
a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
|
397 |
|
398 |
+
#gr.Examples(
|
399 |
+
#examples=[
|
400 |
+
# ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/Aragaki.png", None],
|
401 |
+
# ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/solo.png", None],
|
402 |
+
# ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
|
403 |
+
# ],
|
404 |
+
# inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video],
|
405 |
+
#)
|
406 |
|
407 |
|
408 |
with gr.Tab("Video2video"):
|
|
|
417 |
v2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
|
418 |
|
419 |
with gr.Row():
|
420 |
+
v2v_length = gr.Slider(minimum=0, maximum=999, step=1, value=30, label="Length (-L) (Set to 0 to automatically calculate length)")
|
421 |
v2v_seed = gr.Number(value=42, label="Seed (--seed)")
|
422 |
|
423 |
v2v_botton = gr.Button("Generate", variant="primary")
|
424 |
v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
|
425 |
|
426 |
+
#gr.Examples(
|
427 |
+
# examples=[
|
428 |
+
# ["configs/inference/ref_images/Aragaki.png", "configs/inference/video/Aragaki_song.mp4"],
|
429 |
+
# ["configs/inference/ref_images/solo.png", "configs/inference/video/Aragaki_song.mp4"],
|
430 |
+
# ["configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
|
431 |
+
# ],
|
432 |
+
# inputs=[v2v_ref_img, v2v_source_video, a2v_headpose_video],
|
433 |
+
#)
|
434 |
|
435 |
a2v_botton.click(
|
436 |
fn=audio2video,
|
|
|
445 |
outputs=[v2v_output_video, v2v_ref_img]
|
446 |
)
|
447 |
|
448 |
+
demo.launch(share=True)
|