Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
import mediapy
|
5 |
+
from PIL import Image
|
6 |
+
from eval import interpolator, util
|
7 |
+
import tensorflow as tf
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
|
11 |
+
_UINT8_MAX_F = float(np.iinfo(np.uint8).max)
|
12 |
+
|
13 |
+
from huggingface_hub import snapshot_download
|
14 |
+
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
|
15 |
+
|
16 |
+
interpolator = interpolator.Interpolator(model, None)
|
17 |
+
|
18 |
+
batch_dt = np.full(shape=(1,), fill_value=0.5, dtype=np.float32)
|
19 |
+
|
20 |
+
def predict(frame1, frame2, times_to_interpolate):
|
21 |
+
img1 = frame1
|
22 |
+
img2 = frame2
|
23 |
+
if not img1.size == img2.size:
|
24 |
+
img1 = img1.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1])))
|
25 |
+
img2 = img2.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1])))
|
26 |
+
frame1 = 'new_frame1.png'
|
27 |
+
frame2 = 'new_frame2.png'
|
28 |
+
img1.save(frame1)
|
29 |
+
img2.save(frame2)
|
30 |
+
|
31 |
+
input_frames = [str(frame1), str(frame2)]
|
32 |
+
|
33 |
+
frames = list(
|
34 |
+
util.interpolate_recursively_from_files(
|
35 |
+
input_frames, times_to_interpolate, interpolator))
|
36 |
+
ffmpeg_path = util.get_ffmpeg_path()
|
37 |
+
mediapy.set_ffmpeg(ffmpeg_path)
|
38 |
+
out_path = "out.mp4"
|
39 |
+
mediapy.write_video(str(out_path), frames, fps=30)
|
40 |
+
return out_path
|
41 |
+
|
42 |
+
gr.Interface(predict,[gr.inputs.Image(type='pil'),gr.inputs.Image(type='pil'),gr.inputs.Slider(minimum=2,maximum=5,step=1)],"playable_video").launch(enable_queue=True)
|