Inferencer commited on
Commit
e04e4d5
1 Parent(s): b1e008c

Upload 66 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. facefusion/__init__.py +0 -0
  2. facefusion/choices.py +30 -0
  3. facefusion/common_helper.py +14 -0
  4. facefusion/config.py +80 -0
  5. facefusion/content_analyser.py +104 -0
  6. facefusion/core.py +308 -0
  7. facefusion/download.py +49 -0
  8. facefusion/execution_helper.py +37 -0
  9. facefusion/face_analyser.py +357 -0
  10. facefusion/face_helper.py +123 -0
  11. facefusion/face_masker.py +129 -0
  12. facefusion/face_store.py +48 -0
  13. facefusion/ffmpeg.py +98 -0
  14. facefusion/filesystem.py +91 -0
  15. facefusion/globals.py +55 -0
  16. facefusion/installer.py +92 -0
  17. facefusion/logger.py +47 -0
  18. facefusion/memory.py +21 -0
  19. facefusion/metadata.py +13 -0
  20. facefusion/normalizer.py +44 -0
  21. facefusion/processors/__init__.py +0 -0
  22. facefusion/processors/frame/__init__.py +0 -0
  23. facefusion/processors/frame/choices.py +13 -0
  24. facefusion/processors/frame/core.py +101 -0
  25. facefusion/processors/frame/globals.py +10 -0
  26. facefusion/processors/frame/modules/__init__.py +0 -0
  27. facefusion/processors/frame/modules/face_debugger.py +149 -0
  28. facefusion/processors/frame/modules/face_enhancer.py +263 -0
  29. facefusion/processors/frame/modules/face_swapper.py +321 -0
  30. facefusion/processors/frame/modules/frame_enhancer.py +178 -0
  31. facefusion/processors/frame/typings.py +6 -0
  32. facefusion/typing.py +58 -0
  33. facefusion/uis/__init__.py +0 -0
  34. facefusion/uis/assets/fixes.css +7 -0
  35. facefusion/uis/assets/overrides.css +44 -0
  36. facefusion/uis/choices.py +7 -0
  37. facefusion/uis/components/__init__.py +0 -0
  38. facefusion/uis/components/about.py +23 -0
  39. facefusion/uis/components/benchmark.py +137 -0
  40. facefusion/uis/components/benchmark_options.py +29 -0
  41. facefusion/uis/components/common_options.py +35 -0
  42. facefusion/uis/components/execution.py +34 -0
  43. facefusion/uis/components/execution_queue_count.py +28 -0
  44. facefusion/uis/components/execution_thread_count.py +29 -0
  45. facefusion/uis/components/face_analyser.py +98 -0
  46. facefusion/uis/components/face_masker.py +123 -0
  47. facefusion/uis/components/face_selector.py +164 -0
  48. facefusion/uis/components/frame_processors.py +40 -0
  49. facefusion/uis/components/frame_processors_options.py +141 -0
  50. facefusion/uis/components/memory.py +41 -0
facefusion/__init__.py ADDED
File without changes
facefusion/choices.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from facefusion.typing import VideoMemoryStrategy, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder, OutputVideoPreset
4
+ from facefusion.common_helper import create_int_range, create_float_range
5
+
6
+ video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ]
7
+ face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
8
+ face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
9
+ face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
10
+ face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
11
+ face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
12
+ face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
13
+ face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
14
+ face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ]
15
+ temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png', 'bmp' ]
16
+ output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
17
+ output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]
18
+
19
+ video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160 ]
20
+
21
+ execution_thread_count_range : List[int] = create_int_range(1, 128, 1)
22
+ execution_queue_count_range : List[int] = create_int_range(1, 32, 1)
23
+ system_memory_limit_range : List[int] = create_int_range(0, 128, 1)
24
+ face_detector_score_range : List[float] = create_float_range(0.0, 1.0, 0.05)
25
+ face_mask_blur_range : List[float] = create_float_range(0.0, 1.0, 0.05)
26
+ face_mask_padding_range : List[int] = create_int_range(0, 100, 1)
27
+ reference_face_distance_range : List[float] = create_float_range(0.0, 1.5, 0.05)
28
+ temp_frame_quality_range : List[int] = create_int_range(0, 100, 1)
29
+ output_image_quality_range : List[int] = create_int_range(0, 100, 1)
30
+ output_video_quality_range : List[int] = create_int_range(0, 100, 1)
facefusion/common_helper.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Any
2
+ import numpy
3
+
4
+
5
+ def create_metavar(ranges : List[Any]) -> str:
6
+ return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
7
+
8
+
9
+ def create_int_range(start : int, stop : int, step : int) -> List[int]:
10
+ return (numpy.arange(start, stop + step, step)).tolist()
11
+
12
+
13
+ def create_float_range(start : float, stop : float, step : float) -> List[float]:
14
+ return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
facefusion/config.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configparser import ConfigParser
2
+ from typing import Optional, List
3
+
4
+ from facefusion.filesystem import resolve_relative_path
5
+
6
+ CONFIG = None
7
+
8
+
9
+ def get_config() -> ConfigParser:
10
+ global CONFIG
11
+
12
+ if CONFIG is None:
13
+ config_path = resolve_relative_path('../facefusion.ini')
14
+ CONFIG = ConfigParser()
15
+ CONFIG.read(config_path)
16
+ return CONFIG
17
+
18
+
19
+ def clear_config() -> None:
20
+ global CONFIG
21
+
22
+ CONFIG = None
23
+
24
+
25
+ def get_str_value(key : str, fallback : Optional[str] = None) -> Optional[str]:
26
+ section, option = key.split('.')
27
+ value = get_config()[section].get(option)
28
+ if value or fallback:
29
+ return str(value or fallback)
30
+ return None
31
+
32
+
33
+ def get_int_value(key : str, fallback : Optional[str] = None) -> Optional[int]:
34
+ section, option = key.split('.')
35
+ value = get_config()[section].get(option)
36
+ if value or fallback:
37
+ return int(value or fallback)
38
+ return None
39
+
40
+
41
+ def get_float_value(key : str, fallback : Optional[str] = None) -> Optional[float]:
42
+ section, option = key.split('.')
43
+ value = get_config()[section].get(option)
44
+ if value or fallback:
45
+ return float(value or fallback)
46
+ return None
47
+
48
+
49
+ def get_bool_value(key : str, fallback : Optional[str] = None) -> Optional[bool]:
50
+ section, option = key.split('.')
51
+ value = get_config()[section].get(option, fallback)
52
+ if value == 'True' or fallback == 'True':
53
+ return True
54
+ if value == 'False' or fallback == 'False':
55
+ return False
56
+ return None
57
+
58
+
59
+ def get_str_list(key : str, fallback : Optional[str] = None) -> Optional[List[str]]:
60
+ section, option = key.split('.')
61
+ value = get_config()[section].get(option)
62
+ if value or fallback:
63
+ return [ str(value) for value in (value or fallback).split(' ') ]
64
+ return None
65
+
66
+
67
+ def get_int_list(key : str, fallback : Optional[str] = None) -> Optional[List[int]]:
68
+ section, option = key.split('.')
69
+ value = get_config()[section].get(option)
70
+ if value or fallback:
71
+ return [ int(value) for value in (value or fallback).split(' ') ]
72
+ return None
73
+
74
+
75
+ def get_float_list(key : str, fallback : Optional[str] = None) -> Optional[List[float]]:
76
+ section, option = key.split('.')
77
+ value = get_config()[section].get(option)
78
+ if value or fallback:
79
+ return [ float(value) for value in (value or fallback).split(' ') ]
80
+ return None
facefusion/content_analyser.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+ from functools import lru_cache
3
+ import threading
4
+ import cv2
5
+ import numpy
6
+ import onnxruntime
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion import wording
11
+ from facefusion.typing import Frame, ModelValue, Fps
12
+ from facefusion.execution_helper import apply_execution_provider_options
13
+ from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_video_fps
14
+ from facefusion.filesystem import resolve_relative_path
15
+ from facefusion.download import conditional_download
16
+
17
+ CONTENT_ANALYSER = None
18
+ THREAD_LOCK : threading.Lock = threading.Lock()
19
+ MODELS : Dict[str, ModelValue] =\
20
+ {
21
+ 'open_nsfw':
22
+ {
23
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx',
24
+ 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
25
+ }
26
+ }
27
+ PROBABILITY_LIMIT = 0.80
28
+ RATE_LIMIT = 5
29
+ STREAM_COUNTER = 0
30
+
31
+
32
+ def get_content_analyser() -> Any:
33
+ global CONTENT_ANALYSER
34
+
35
+ with THREAD_LOCK:
36
+ if CONTENT_ANALYSER is None:
37
+ model_path = MODELS.get('open_nsfw').get('path')
38
+ CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
39
+ return CONTENT_ANALYSER
40
+
41
+
42
+ def clear_content_analyser() -> None:
43
+ global CONTENT_ANALYSER
44
+
45
+ CONTENT_ANALYSER = None
46
+
47
+
48
+ def pre_check() -> bool:
49
+ if not facefusion.globals.skip_download:
50
+ download_directory_path = resolve_relative_path('../.assets/models')
51
+ model_url = MODELS.get('open_nsfw').get('url')
52
+ conditional_download(download_directory_path, [ model_url ])
53
+ return True
54
+
55
+
56
+ def analyse_stream(frame : Frame, video_fps : Fps) -> bool:
57
+ global STREAM_COUNTER
58
+
59
+ STREAM_COUNTER = STREAM_COUNTER + 1
60
+ if STREAM_COUNTER % int(video_fps) == 0:
61
+ return analyse_frame(frame)
62
+ return False
63
+
64
+
65
+ def prepare_frame(frame : Frame) -> Frame:
66
+ frame = cv2.resize(frame, (224, 224)).astype(numpy.float32)
67
+ frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
68
+ frame = numpy.expand_dims(frame, axis = 0)
69
+ return frame
70
+
71
+
72
+ def analyse_frame(frame : Frame) -> bool:
73
+ content_analyser = get_content_analyser()
74
+ frame = prepare_frame(frame)
75
+ probability = content_analyser.run(None,
76
+ {
77
+ 'input:0': frame
78
+ })[0][0][1]
79
+ return probability > PROBABILITY_LIMIT
80
+
81
+
82
+ @lru_cache(maxsize = None)
83
+ def analyse_image(image_path : str) -> bool:
84
+ frame = read_image(image_path)
85
+ return analyse_frame(frame)
86
+
87
+
88
+ @lru_cache(maxsize = None)
89
+ def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
90
+ video_frame_total = count_video_frame_total(video_path)
91
+ video_fps = detect_video_fps(video_path)
92
+ frame_range = range(start_frame or 0, end_frame or video_frame_total)
93
+ rate = 0.0
94
+ counter = 0
95
+ with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
96
+ for frame_number in frame_range:
97
+ if frame_number % int(video_fps) == 0:
98
+ frame = get_video_frame(video_path, frame_number)
99
+ if analyse_frame(frame):
100
+ counter += 1
101
+ rate = counter * int(video_fps) / len(frame_range) * 100
102
+ progress.update()
103
+ progress.set_postfix(rate = rate)
104
+ return rate > RATE_LIMIT
facefusion/core.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ['OMP_NUM_THREADS'] = '1'
4
+
5
+ import signal
6
+ import sys
7
+ import time
8
+ import warnings
9
+ import shutil
10
+ import numpy
11
+ import onnxruntime
12
+ from time import sleep
13
+ from argparse import ArgumentParser, HelpFormatter
14
+
15
+ import facefusion.choices
16
+ import facefusion.globals
17
+ from facefusion.face_analyser import get_one_face, get_average_face
18
+ from facefusion.face_store import get_reference_faces, append_reference_face
19
+ from facefusion import face_analyser, face_masker, content_analyser, config, metadata, logger, wording
20
+ from facefusion.content_analyser import analyse_image, analyse_video
21
+ from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
22
+ from facefusion.common_helper import create_metavar
23
+ from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
24
+ from facefusion.normalizer import normalize_output_path, normalize_padding, normalize_fps
25
+ from facefusion.memory import limit_system_memory
26
+ from facefusion.filesystem import list_directory, get_temp_frame_paths, create_temp, move_temp, clear_temp, is_image, is_video
27
+ from facefusion.ffmpeg import extract_frames, compress_image, merge_video, restore_audio
28
+ from facefusion.vision import get_video_frame, read_image, read_static_images, pack_resolution, detect_video_resolution, detect_video_fps, create_video_resolutions
29
+
30
+ onnxruntime.set_default_logger_severity(3)
31
+ warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
32
+ warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
33
+
34
+
35
+ def cli() -> None:
36
+ signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
37
+ program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
38
+ # general
39
+ program.add_argument('-s', '--source', help = wording.get('source_help'), action = 'append', dest = 'source_paths', default = config.get_str_list('general.source_paths'))
40
+ program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path', default = config.get_str_value('general.target_path'))
41
+ program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path', default = config.get_str_value('general.output_path'))
42
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
43
+ # misc
44
+ group_misc = program.add_argument_group('misc')
45
+ group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true', default = config.get_bool_value('misc.skip_download'))
46
+ group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true', default = config.get_bool_value('misc.headless'))
47
+ group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = config.get_str_value('misc.log_level', 'info'), choices = logger.get_log_levels())
48
+ # execution
49
+ execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
50
+ group_execution = program.add_argument_group('execution')
51
+ group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
52
+ group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
53
+ group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
54
+ # memory
55
+ group_memory = program.add_argument_group('memory')
56
+ group_memory.add_argument('--video-memory-strategy', help = wording.get('video_memory_strategy_help'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
57
+ group_memory.add_argument('--system-memory-limit', help = wording.get('system_memory_limit_help'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_metavar(facefusion.choices.system_memory_limit_range))
58
+ # face analyser
59
+ group_face_analyser = program.add_argument_group('face analyser')
60
+ group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = config.get_str_value('face_analyser.face_analyser_order', 'left-right'), choices = facefusion.choices.face_analyser_orders)
61
+ group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), default = config.get_str_value('face_analyser.face_analyser_age'), choices = facefusion.choices.face_analyser_ages)
62
+ group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), default = config.get_str_value('face_analyser.face_analyser_gender'), choices = facefusion.choices.face_analyser_genders)
63
+ group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = config.get_str_value('face_analyser.face_detector_model', 'retinaface'), choices = facefusion.choices.face_detector_models)
64
+ group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = config.get_str_value('face_analyser.face_detector_size', '640x640'), choices = facefusion.choices.face_detector_sizes)
65
+ group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = config.get_float_value('face_analyser.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
66
+ # face selector
67
+ group_face_selector = program.add_argument_group('face selector')
68
+ group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
69
+ group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0'))
70
+ group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
71
+ group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0'))
72
+ # face mask
73
+ group_face_mask = program.add_argument_group('face mask')
74
+ group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_mask.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
75
+ group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = config.get_float_value('face_mask.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
76
+ group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = config.get_int_list('face_mask.face_mask_padding', '0 0 0 0'), nargs = '+')
77
+ group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_mask.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
78
+ # frame extraction
79
+ group_frame_extraction = program.add_argument_group('frame extraction')
80
+ group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start'))
81
+ group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end'))
82
+ group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = config.get_str_value('frame_extraction.temp_frame_format', 'jpg'), choices = facefusion.choices.temp_frame_formats)
83
+ group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = config.get_int_value('frame_extraction.temp_frame_quality', '100'), choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
84
+ group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp'))
85
+ # output creation
86
+ group_output_creation = program.add_argument_group('output creation')
87
+ group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
88
+ group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders)
89
+ group_output_creation.add_argument('--output-video-preset', help = wording.get('output_video_preset_help'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
90
+ group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
91
+ group_output_creation.add_argument('--output-video-resolution', help = wording.get('output_video_resolution_help'), default = config.get_str_value('output_creation.output_video_resolution'))
92
+ group_output_creation.add_argument('--output-video-fps', help = wording.get('output_video_fps_help'), type = float)
93
+ group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio'))
94
+ # frame processors
95
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
96
+ program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
97
+ group_frame_processors = program.add_argument_group('frame processors')
98
+ group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = config.get_str_list('frame_processors.frame_processors', 'face_swapper'), nargs = '+')
99
+ for frame_processor in available_frame_processors:
100
+ frame_processor_module = load_frame_processor_module(frame_processor)
101
+ frame_processor_module.register_args(group_frame_processors)
102
+ # uis
103
+ available_ui_layouts = list_directory('facefusion/uis/layouts')
104
+ group_uis = program.add_argument_group('uis')
105
+ group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layout', 'default'), nargs = '+')
106
+ run(program)
107
+
108
+
109
+ def apply_args(program : ArgumentParser) -> None:
110
+ args = program.parse_args()
111
+ # general
112
+ facefusion.globals.source_paths = args.source_paths
113
+ facefusion.globals.target_path = args.target_path
114
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, args.output_path)
115
+ # misc
116
+ facefusion.globals.skip_download = args.skip_download
117
+ facefusion.globals.headless = args.headless
118
+ facefusion.globals.log_level = args.log_level
119
+ # execution
120
+ facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
121
+ facefusion.globals.execution_thread_count = args.execution_thread_count
122
+ facefusion.globals.execution_queue_count = args.execution_queue_count
123
+ # memory
124
+ facefusion.globals.video_memory_strategy = args.video_memory_strategy
125
+ facefusion.globals.system_memory_limit = args.system_memory_limit
126
+ # face analyser
127
+ facefusion.globals.face_analyser_order = args.face_analyser_order
128
+ facefusion.globals.face_analyser_age = args.face_analyser_age
129
+ facefusion.globals.face_analyser_gender = args.face_analyser_gender
130
+ facefusion.globals.face_detector_model = args.face_detector_model
131
+ facefusion.globals.face_detector_size = args.face_detector_size
132
+ facefusion.globals.face_detector_score = args.face_detector_score
133
+ # face selector
134
+ facefusion.globals.face_selector_mode = args.face_selector_mode
135
+ facefusion.globals.reference_face_position = args.reference_face_position
136
+ facefusion.globals.reference_face_distance = args.reference_face_distance
137
+ facefusion.globals.reference_frame_number = args.reference_frame_number
138
+ # face mask
139
+ facefusion.globals.face_mask_types = args.face_mask_types
140
+ facefusion.globals.face_mask_blur = args.face_mask_blur
141
+ facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
142
+ facefusion.globals.face_mask_regions = args.face_mask_regions
143
+ # frame extraction
144
+ facefusion.globals.trim_frame_start = args.trim_frame_start
145
+ facefusion.globals.trim_frame_end = args.trim_frame_end
146
+ facefusion.globals.temp_frame_format = args.temp_frame_format
147
+ facefusion.globals.temp_frame_quality = args.temp_frame_quality
148
+ facefusion.globals.keep_temp = args.keep_temp
149
+ # output creation
150
+ facefusion.globals.output_image_quality = args.output_image_quality
151
+ facefusion.globals.output_video_encoder = args.output_video_encoder
152
+ facefusion.globals.output_video_preset = args.output_video_preset
153
+ facefusion.globals.output_video_quality = args.output_video_quality
154
+ if is_video(args.target_path):
155
+ target_video_resolutions = create_video_resolutions(args.target_path)
156
+ if args.output_video_resolution in target_video_resolutions:
157
+ facefusion.globals.output_video_resolution = args.output_video_resolution
158
+ else:
159
+ target_video_resolution = detect_video_resolution(args.target_path)
160
+ facefusion.globals.output_video_resolution = pack_resolution(target_video_resolution)
161
+ if args.output_video_fps or is_video(args.target_path):
162
+ facefusion.globals.output_video_fps = normalize_fps(args.output_video_fps) or detect_video_fps(args.target_path)
163
+ facefusion.globals.skip_audio = args.skip_audio
164
+ # frame processors
165
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
166
+ facefusion.globals.frame_processors = args.frame_processors
167
+ for frame_processor in available_frame_processors:
168
+ frame_processor_module = load_frame_processor_module(frame_processor)
169
+ frame_processor_module.apply_args(program)
170
+ # uis
171
+ facefusion.globals.ui_layouts = args.ui_layouts
172
+
173
+
174
+ def run(program : ArgumentParser) -> None:
175
+ apply_args(program)
176
+ logger.init(facefusion.globals.log_level)
177
+ if facefusion.globals.system_memory_limit > 0:
178
+ limit_system_memory(facefusion.globals.system_memory_limit)
179
+ if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check():
180
+ return
181
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
182
+ if not frame_processor_module.pre_check():
183
+ return
184
+ if facefusion.globals.headless:
185
+ conditional_process()
186
+ else:
187
+ import facefusion.uis.core as ui
188
+
189
+ for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts):
190
+ if not ui_layout.pre_check():
191
+ return
192
+ ui.launch()
193
+
194
+
195
+ def destroy() -> None:
196
+ if facefusion.globals.target_path:
197
+ clear_temp(facefusion.globals.target_path)
198
+ sys.exit(0)
199
+
200
+
201
+ def pre_check() -> bool:
202
+ if sys.version_info < (3, 9):
203
+ logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
204
+ return False
205
+ if not shutil.which('ffmpeg'):
206
+ logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
207
+ return False
208
+ return True
209
+
210
+
211
+ def conditional_process() -> None:
212
+ start_time = time.time()
213
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
214
+ while not frame_processor_module.post_check():
215
+ logger.disable()
216
+ sleep(0.5)
217
+ logger.enable()
218
+ if not frame_processor_module.pre_process('output'):
219
+ return
220
+ conditional_append_reference_faces()
221
+ if is_image(facefusion.globals.target_path):
222
+ process_image(start_time)
223
+ if is_video(facefusion.globals.target_path):
224
+ process_video(start_time)
225
+
226
+
227
+ def conditional_append_reference_faces() -> None:
228
+ if 'reference' in facefusion.globals.face_selector_mode and not get_reference_faces():
229
+ source_frames = read_static_images(facefusion.globals.source_paths)
230
+ source_face = get_average_face(source_frames)
231
+ if is_video(facefusion.globals.target_path):
232
+ reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
233
+ else:
234
+ reference_frame = read_image(facefusion.globals.target_path)
235
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
236
+ append_reference_face('origin', reference_face)
237
+ if source_face and reference_face:
238
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
239
+ abstract_reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
240
+ if numpy.any(abstract_reference_frame):
241
+ reference_frame = abstract_reference_frame
242
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
243
+ append_reference_face(frame_processor_module.__name__, reference_face)
244
+
245
+
246
+ def process_image(start_time : float) -> None:
247
+ if analyse_image(facefusion.globals.target_path):
248
+ return
249
+ shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
250
+ # process frame
251
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
252
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
253
+ frame_processor_module.process_image(facefusion.globals.source_paths, facefusion.globals.output_path, facefusion.globals.output_path)
254
+ frame_processor_module.post_process()
255
+ # compress image
256
+ logger.info(wording.get('compressing_image'), __name__.upper())
257
+ if not compress_image(facefusion.globals.output_path):
258
+ logger.error(wording.get('compressing_image_failed'), __name__.upper())
259
+ # validate image
260
+ if is_image(facefusion.globals.output_path):
261
+ seconds = '{:.2f}'.format((time.time() - start_time) % 60)
262
+ logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__.upper())
263
+ else:
264
+ logger.error(wording.get('processing_image_failed'), __name__.upper())
265
+
266
+
267
+ def process_video(start_time : float) -> None:
268
+ if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
269
+ return
270
+ # create temp
271
+ logger.info(wording.get('creating_temp'), __name__.upper())
272
+ create_temp(facefusion.globals.target_path)
273
+ # extract frames
274
+ logger.info(wording.get('extracting_frames_fps').format(video_fps = facefusion.globals.output_video_fps), __name__.upper())
275
+ extract_frames(facefusion.globals.target_path, facefusion.globals.output_video_resolution, facefusion.globals.output_video_fps)
276
+ # process frame
277
+ temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
278
+ if temp_frame_paths:
279
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
280
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
281
+ frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths)
282
+ frame_processor_module.post_process()
283
+ else:
284
+ logger.error(wording.get('temp_frames_not_found'), __name__.upper())
285
+ return
286
+ # merge video
287
+ logger.info(wording.get('merging_video_fps').format(video_fps = facefusion.globals.output_video_fps), __name__.upper())
288
+ if not merge_video(facefusion.globals.target_path, facefusion.globals.output_video_fps):
289
+ logger.error(wording.get('merging_video_failed'), __name__.upper())
290
+ return
291
+ # handle audio
292
+ if facefusion.globals.skip_audio:
293
+ logger.info(wording.get('skipping_audio'), __name__.upper())
294
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
295
+ else:
296
+ logger.info(wording.get('restoring_audio'), __name__.upper())
297
+ if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path, facefusion.globals.output_video_fps):
298
+ logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
299
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
300
+ # clear temp
301
+ logger.info(wording.get('clearing_temp'), __name__.upper())
302
+ clear_temp(facefusion.globals.target_path)
303
+ # validate video
304
+ if is_video(facefusion.globals.output_path):
305
+ seconds = '{:.2f}'.format((time.time() - start_time))
306
+ logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__.upper())
307
+ else:
308
+ logger.error(wording.get('processing_video_failed'), __name__.upper())
facefusion/download.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import platform
4
+ import ssl
5
+ import urllib.request
6
+ from typing import List
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from functools import lru_cache
9
+ from tqdm import tqdm
10
+
11
+ import facefusion.globals
12
+ from facefusion import wording
13
+ from facefusion.filesystem import is_file
14
+
15
+ if platform.system().lower() == 'darwin':
16
+ ssl._create_default_https_context = ssl._create_unverified_context
17
+
18
+
19
+ def conditional_download(download_directory_path : str, urls : List[str]) -> None:
20
+ with ThreadPoolExecutor() as executor:
21
+ for url in urls:
22
+ executor.submit(get_download_size, url)
23
+ for url in urls:
24
+ download_file_path = os.path.join(download_directory_path, os.path.basename(url))
25
+ initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
26
+ total = get_download_size(url)
27
+ if initial < total:
28
+ with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
29
+ subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
30
+ current = initial
31
+ while current < total:
32
+ if is_file(download_file_path):
33
+ current = os.path.getsize(download_file_path)
34
+ progress.update(current - progress.n)
35
+
36
+
37
+ @lru_cache(maxsize = None)
38
+ def get_download_size(url : str) -> int:
39
+ try:
40
+ response = urllib.request.urlopen(url, timeout = 10)
41
+ return int(response.getheader('Content-Length'))
42
+ except (OSError, ValueError):
43
+ return 0
44
+
45
+
46
+ def is_download_done(url : str, file_path : str) -> bool:
47
+ if is_file(file_path):
48
+ return get_download_size(url) == os.path.getsize(file_path)
49
+ return False
facefusion/execution_helper.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List
2
+ import onnxruntime
3
+
4
+
5
+ def encode_execution_providers(execution_providers : List[str]) -> List[str]:
6
+ return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ]
7
+
8
+
9
+ def decode_execution_providers(execution_providers: List[str]) -> List[str]:
10
+ available_execution_providers = onnxruntime.get_available_providers()
11
+ encoded_execution_providers = encode_execution_providers(available_execution_providers)
12
+
13
+ return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ]
14
+
15
+
16
+ def apply_execution_provider_options(execution_providers: List[str]) -> List[Any]:
17
+ execution_providers_with_options : List[Any] = []
18
+
19
+ for execution_provider in execution_providers:
20
+ if execution_provider == 'CUDAExecutionProvider':
21
+ execution_providers_with_options.append((execution_provider,
22
+ {
23
+ 'cudnn_conv_algo_search': 'DEFAULT'
24
+ }))
25
+ else:
26
+ execution_providers_with_options.append(execution_provider)
27
+ return execution_providers_with_options
28
+
29
+
30
+ def map_torch_backend(execution_providers : List[str]) -> str:
31
+ if 'CoreMLExecutionProvider' in execution_providers:
32
+ return 'mps'
33
+ if 'CUDAExecutionProvider' in execution_providers or 'ROCMExecutionProvider' in execution_providers :
34
+ return 'cuda'
35
+ if 'OpenVINOExecutionProvider' in execution_providers:
36
+ return 'mkl'
37
+ return 'cpu'
facefusion/face_analyser.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, List, Tuple
2
+ import threading
3
+ import cv2
4
+ import numpy
5
+ import onnxruntime
6
+
7
+ import facefusion.globals
8
+ from facefusion.download import conditional_download
9
+ from facefusion.face_store import get_static_faces, set_static_faces
10
+ from facefusion.execution_helper import apply_execution_provider_options
11
+ from facefusion.face_helper import warp_face_by_kps, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
12
+ from facefusion.filesystem import resolve_relative_path
13
+ from facefusion.typing import Frame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, Bbox, Kps, Score, Embedding
14
+ from facefusion.vision import resize_frame_resolution, unpack_resolution
15
+
16
+ FACE_ANALYSER = None
17
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
18
+ THREAD_LOCK : threading.Lock = threading.Lock()
19
+ MODELS : ModelSet =\
20
+ {
21
+ 'face_detector_retinaface':
22
+ {
23
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/retinaface_10g.onnx',
24
+ 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
25
+ },
26
+ 'face_detector_yunet':
27
+ {
28
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
29
+ 'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
30
+ },
31
+ 'face_recognizer_arcface_blendswap':
32
+ {
33
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
34
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
35
+ },
36
+ 'face_recognizer_arcface_inswapper':
37
+ {
38
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
39
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
40
+ },
41
+ 'face_recognizer_arcface_simswap':
42
+ {
43
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_simswap.onnx',
44
+ 'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx')
45
+ },
46
+ 'gender_age':
47
+ {
48
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gender_age.onnx',
49
+ 'path': resolve_relative_path('../.assets/models/gender_age.onnx')
50
+ }
51
+ }
52
+
53
+
54
+ def get_face_analyser() -> Any:
55
+ global FACE_ANALYSER
56
+
57
+ with THREAD_LOCK:
58
+ if FACE_ANALYSER is None:
59
+ if facefusion.globals.face_detector_model == 'retinaface':
60
+ face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
61
+ if facefusion.globals.face_detector_model == 'yunet':
62
+ face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
63
+ if facefusion.globals.face_recognizer_model == 'arcface_blendswap':
64
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
65
+ if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
66
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
67
+ if facefusion.globals.face_recognizer_model == 'arcface_simswap':
68
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
69
+ gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
70
+ FACE_ANALYSER =\
71
+ {
72
+ 'face_detector': face_detector,
73
+ 'face_recognizer': face_recognizer,
74
+ 'gender_age': gender_age
75
+ }
76
+ return FACE_ANALYSER
77
+
78
+
79
+ def clear_face_analyser() -> Any:
80
+ global FACE_ANALYSER
81
+
82
+ FACE_ANALYSER = None
83
+
84
+
85
+ def pre_check() -> bool:
86
+ if not facefusion.globals.skip_download:
87
+ download_directory_path = resolve_relative_path('../.assets/models')
88
+ model_urls =\
89
+ [
90
+ MODELS.get('face_detector_retinaface').get('url'),
91
+ MODELS.get('face_detector_yunet').get('url'),
92
+ MODELS.get('face_recognizer_arcface_inswapper').get('url'),
93
+ MODELS.get('face_recognizer_arcface_simswap').get('url'),
94
+ MODELS.get('gender_age').get('url')
95
+ ]
96
+ conditional_download(download_directory_path, model_urls)
97
+ return True
98
+
99
+
100
+ def extract_faces(frame : Frame) -> List[Face]:
101
+ face_detector_width, face_detector_height = unpack_resolution(facefusion.globals.face_detector_size)
102
+ frame_height, frame_width, _ = frame.shape
103
+ temp_frame = resize_frame_resolution(frame, face_detector_width, face_detector_height)
104
+ temp_frame_height, temp_frame_width, _ = temp_frame.shape
105
+ ratio_height = frame_height / temp_frame_height
106
+ ratio_width = frame_width / temp_frame_width
107
+ if facefusion.globals.face_detector_model == 'retinaface':
108
+ bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width)
109
+ return create_faces(frame, bbox_list, kps_list, score_list)
110
+ elif facefusion.globals.face_detector_model == 'yunet':
111
+ bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width)
112
+ return create_faces(frame, bbox_list, kps_list, score_list)
113
+ return []
114
+
115
+
116
+ def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
117
+ face_detector = get_face_analyser().get('face_detector')
118
+ bbox_list = []
119
+ kps_list = []
120
+ score_list = []
121
+ feature_strides = [ 8, 16, 32 ]
122
+ feature_map_channel = 3
123
+ anchor_total = 2
124
+ prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
125
+ prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame
126
+ temp_frame = (prepare_frame - 127.5) / 128.0
127
+ temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
128
+ with THREAD_SEMAPHORE:
129
+ detections = face_detector.run(None,
130
+ {
131
+ face_detector.get_inputs()[0].name: temp_frame
132
+ })
133
+ for index, feature_stride in enumerate(feature_strides):
134
+ keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0]
135
+ if keep_indices.any():
136
+ stride_height = face_detector_height // feature_stride
137
+ stride_width = face_detector_width // feature_stride
138
+ anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
139
+ bbox_raw = detections[index + feature_map_channel] * feature_stride
140
+ kps_raw = detections[index + feature_map_channel * 2] * feature_stride
141
+ for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]:
142
+ bbox_list.append(numpy.array(
143
+ [
144
+ bbox[0] * ratio_width,
145
+ bbox[1] * ratio_height,
146
+ bbox[2] * ratio_width,
147
+ bbox[3] * ratio_height
148
+ ]))
149
+ for kps in distance_to_kps(anchors, kps_raw)[keep_indices]:
150
+ kps_list.append(kps * [ ratio_width, ratio_height ])
151
+ for score in detections[index][keep_indices]:
152
+ score_list.append(score[0])
153
+ return bbox_list, kps_list, score_list
154
+
155
+
156
+ def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
157
+ face_detector = get_face_analyser().get('face_detector')
158
+ face_detector.setInputSize((temp_frame_width, temp_frame_height))
159
+ face_detector.setScoreThreshold(facefusion.globals.face_detector_score)
160
+ bbox_list = []
161
+ kps_list = []
162
+ score_list = []
163
+ with THREAD_SEMAPHORE:
164
+ _, detections = face_detector.detect(temp_frame)
165
+ if detections.any():
166
+ for detection in detections:
167
+ bbox_list.append(numpy.array(
168
+ [
169
+ detection[0] * ratio_width,
170
+ detection[1] * ratio_height,
171
+ (detection[0] + detection[2]) * ratio_width,
172
+ (detection[1] + detection[3]) * ratio_height
173
+ ]))
174
+ kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height])
175
+ score_list.append(detection[14])
176
+ return bbox_list, kps_list, score_list
177
+
178
+
179
+ def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face]:
180
+ faces = []
181
+ if facefusion.globals.face_detector_score > 0:
182
+ sort_indices = numpy.argsort(-numpy.array(score_list))
183
+ bbox_list = [ bbox_list[index] for index in sort_indices ]
184
+ kps_list = [ kps_list[index] for index in sort_indices ]
185
+ score_list = [ score_list[index] for index in sort_indices ]
186
+ keep_indices = apply_nms(bbox_list, 0.4)
187
+ for index in keep_indices:
188
+ bbox = bbox_list[index]
189
+ kps = kps_list[index]
190
+ score = score_list[index]
191
+ embedding, normed_embedding = calc_embedding(frame, kps)
192
+ gender, age = detect_gender_age(frame, bbox)
193
+ faces.append(Face(
194
+ bbox = bbox,
195
+ kps = kps,
196
+ score = score,
197
+ embedding = embedding,
198
+ normed_embedding = normed_embedding,
199
+ gender = gender,
200
+ age = age
201
+ ))
202
+ return faces
203
+
204
+
205
+ def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
206
+ face_recognizer = get_face_analyser().get('face_recognizer')
207
+ crop_frame, matrix = warp_face_by_kps(temp_frame, kps, 'arcface_112_v2', (112, 112))
208
+ crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
209
+ crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
210
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0)
211
+ embedding = face_recognizer.run(None,
212
+ {
213
+ face_recognizer.get_inputs()[0].name: crop_frame
214
+ })[0]
215
+ embedding = embedding.ravel()
216
+ normed_embedding = embedding / numpy.linalg.norm(embedding)
217
+ return embedding, normed_embedding
218
+
219
+
220
+ def detect_gender_age(frame : Frame, bbox : Bbox) -> Tuple[int, int]:
221
+ gender_age = get_face_analyser().get('gender_age')
222
+ bbox = bbox.reshape(2, -1)
223
+ scale = 64 / numpy.subtract(*bbox[::-1]).max()
224
+ translation = 48 - bbox.sum(axis = 0) * 0.5 * scale
225
+ affine_matrix = numpy.array([[ scale, 0, translation[0] ], [ 0, scale, translation[1] ]])
226
+ crop_frame = cv2.warpAffine(frame, affine_matrix, (96, 96))
227
+ crop_frame = crop_frame.astype(numpy.float32)[:, :, ::-1].transpose(2, 0, 1)
228
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0)
229
+ prediction = gender_age.run(None,
230
+ {
231
+ gender_age.get_inputs()[0].name: crop_frame
232
+ })[0][0]
233
+ gender = int(numpy.argmax(prediction[:2]))
234
+ age = int(numpy.round(prediction[2] * 100))
235
+ return gender, age
236
+
237
+
238
+ def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
239
+ many_faces = get_many_faces(frame)
240
+ if many_faces:
241
+ try:
242
+ return many_faces[position]
243
+ except IndexError:
244
+ return many_faces[-1]
245
+ return None
246
+
247
+
248
+ def get_average_face(frames : List[Frame], position : int = 0) -> Optional[Face]:
249
+ average_face = None
250
+ faces = []
251
+ embedding_list = []
252
+ normed_embedding_list = []
253
+ for frame in frames:
254
+ face = get_one_face(frame, position)
255
+ if face:
256
+ faces.append(face)
257
+ embedding_list.append(face.embedding)
258
+ normed_embedding_list.append(face.normed_embedding)
259
+ if faces:
260
+ average_face = Face(
261
+ bbox = faces[0].bbox,
262
+ kps = faces[0].kps,
263
+ score = faces[0].score,
264
+ embedding = numpy.mean(embedding_list, axis = 0),
265
+ normed_embedding = numpy.mean(normed_embedding_list, axis = 0),
266
+ gender = faces[0].gender,
267
+ age = faces[0].age
268
+ )
269
+ return average_face
270
+
271
+
272
+ def get_many_faces(frame : Frame) -> List[Face]:
273
+ try:
274
+ faces_cache = get_static_faces(frame)
275
+ if faces_cache:
276
+ faces = faces_cache
277
+ else:
278
+ faces = extract_faces(frame)
279
+ set_static_faces(frame, faces)
280
+ if facefusion.globals.face_analyser_order:
281
+ faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
282
+ if facefusion.globals.face_analyser_age:
283
+ faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
284
+ if facefusion.globals.face_analyser_gender:
285
+ faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
286
+ return faces
287
+ except (AttributeError, ValueError):
288
+ return []
289
+
290
+
291
+ def find_similar_faces(frame : Frame, reference_faces : FaceSet, face_distance : float) -> List[Face]:
292
+ similar_faces : List[Face] = []
293
+ many_faces = get_many_faces(frame)
294
+
295
+ if reference_faces:
296
+ for reference_set in reference_faces:
297
+ if not similar_faces:
298
+ for reference_face in reference_faces[reference_set]:
299
+ for face in many_faces:
300
+ if compare_faces(face, reference_face, face_distance):
301
+ similar_faces.append(face)
302
+ return similar_faces
303
+
304
+
305
+ def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
306
+ current_face_distance = calc_face_distance(face, reference_face)
307
+ return current_face_distance < face_distance
308
+
309
+
310
+ def calc_face_distance(face : Face, reference_face : Face) -> float:
311
+ if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
312
+ return 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
313
+ return 0
314
+
315
+
316
+ def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
317
+ if order == 'left-right':
318
+ return sorted(faces, key = lambda face: face.bbox[0])
319
+ if order == 'right-left':
320
+ return sorted(faces, key = lambda face: face.bbox[0], reverse = True)
321
+ if order == 'top-bottom':
322
+ return sorted(faces, key = lambda face: face.bbox[1])
323
+ if order == 'bottom-top':
324
+ return sorted(faces, key = lambda face: face.bbox[1], reverse = True)
325
+ if order == 'small-large':
326
+ return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]))
327
+ if order == 'large-small':
328
+ return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True)
329
+ if order == 'best-worst':
330
+ return sorted(faces, key = lambda face: face.score, reverse = True)
331
+ if order == 'worst-best':
332
+ return sorted(faces, key = lambda face: face.score)
333
+ return faces
334
+
335
+
336
+ def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
337
+ filter_faces = []
338
+ for face in faces:
339
+ if face.age < 13 and age == 'child':
340
+ filter_faces.append(face)
341
+ elif face.age < 19 and age == 'teen':
342
+ filter_faces.append(face)
343
+ elif face.age < 60 and age == 'adult':
344
+ filter_faces.append(face)
345
+ elif face.age > 59 and age == 'senior':
346
+ filter_faces.append(face)
347
+ return filter_faces
348
+
349
+
350
+ def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
351
+ filter_faces = []
352
+ for face in faces:
353
+ if face.gender == 0 and gender == 'female':
354
+ filter_faces.append(face)
355
+ if face.gender == 1 and gender == 'male':
356
+ filter_faces.append(face)
357
+ return filter_faces
facefusion/face_helper.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Tuple, List
2
+ from cv2.typing import Size
3
+ from functools import lru_cache
4
+ import cv2
5
+ import numpy
6
+
7
+ from facefusion.typing import Bbox, Kps, Frame, Mask, Matrix, Template
8
+
9
+ TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
10
+ {
11
+ 'arcface_112_v1': numpy.array(
12
+ [
13
+ [ 0.35473214, 0.45658929 ],
14
+ [ 0.64526786, 0.45658929 ],
15
+ [ 0.50000000, 0.61154464 ],
16
+ [ 0.37913393, 0.77687500 ],
17
+ [ 0.62086607, 0.77687500 ]
18
+ ]),
19
+ 'arcface_112_v2': numpy.array(
20
+ [
21
+ [ 0.34191607, 0.46157411 ],
22
+ [ 0.65653393, 0.45983393 ],
23
+ [ 0.50022500, 0.64050536 ],
24
+ [ 0.37097589, 0.82469196 ],
25
+ [ 0.63151696, 0.82325089 ]
26
+ ]),
27
+ 'arcface_128_v2': numpy.array(
28
+ [
29
+ [ 0.36167656, 0.40387734 ],
30
+ [ 0.63696719, 0.40235469 ],
31
+ [ 0.50019687, 0.56044219 ],
32
+ [ 0.38710391, 0.72160547 ],
33
+ [ 0.61507734, 0.72034453 ]
34
+ ]),
35
+ 'ffhq_512': numpy.array(
36
+ [
37
+ [ 0.37691676, 0.46864664 ],
38
+ [ 0.62285697, 0.46912813 ],
39
+ [ 0.50123859, 0.61331904 ],
40
+ [ 0.39308822, 0.72541100 ],
41
+ [ 0.61150205, 0.72490465 ]
42
+ ])
43
+ }
44
+
45
+
46
+ def warp_face_by_kps(temp_frame : Frame, kps : Kps, template : Template, crop_size : Size) -> Tuple[Frame, Matrix]:
47
+ normed_template = TEMPLATES.get(template) * crop_size
48
+ affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
49
+ crop_frame = cv2.warpAffine(temp_frame, affine_matrix, crop_size, borderMode = cv2.BORDER_REPLICATE, flags = cv2.INTER_AREA)
50
+ return crop_frame, affine_matrix
51
+
52
+
53
+ def warp_face_by_bbox(temp_frame : Frame, bbox : Bbox, crop_size : Size) -> Tuple[Frame, Matrix]:
54
+ source_kps = numpy.array([[ bbox[0], bbox[1] ], [bbox[2], bbox[1] ], [bbox[0], bbox[3] ]], dtype = numpy.float32)
55
+ target_kps = numpy.array([[ 0, 0 ], [ crop_size[0], 0 ], [ 0, crop_size[1] ]], dtype = numpy.float32)
56
+ affine_matrix = cv2.getAffineTransform(source_kps, target_kps)
57
+ if bbox[2] - bbox[0] > crop_size[0] or bbox[3] - bbox[1] > crop_size[1]:
58
+ interpolation_method = cv2.INTER_AREA
59
+ else:
60
+ interpolation_method = cv2.INTER_LINEAR
61
+ crop_frame = cv2.warpAffine(temp_frame, affine_matrix, crop_size, flags = interpolation_method)
62
+ return crop_frame, affine_matrix
63
+
64
+
65
+ def paste_back(temp_frame : Frame, crop_frame : Frame, crop_mask : Mask, affine_matrix : Matrix) -> Frame:
66
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
67
+ temp_frame_size = temp_frame.shape[:2][::-1]
68
+ inverse_crop_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size).clip(0, 1)
69
+ inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
70
+ paste_frame = temp_frame.copy()
71
+ paste_frame[:, :, 0] = inverse_crop_mask * inverse_crop_frame[:, :, 0] + (1 - inverse_crop_mask) * temp_frame[:, :, 0]
72
+ paste_frame[:, :, 1] = inverse_crop_mask * inverse_crop_frame[:, :, 1] + (1 - inverse_crop_mask) * temp_frame[:, :, 1]
73
+ paste_frame[:, :, 2] = inverse_crop_mask * inverse_crop_frame[:, :, 2] + (1 - inverse_crop_mask) * temp_frame[:, :, 2]
74
+ return paste_frame
75
+
76
+
77
+ @lru_cache(maxsize = None)
78
+ def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
79
+ y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
80
+ anchors = numpy.stack((y, x), axis = -1)
81
+ anchors = (anchors * feature_stride).reshape((-1, 2))
82
+ anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
83
+ return anchors
84
+
85
+
86
+ def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox:
87
+ x1 = points[:, 0] - distance[:, 0]
88
+ y1 = points[:, 1] - distance[:, 1]
89
+ x2 = points[:, 0] + distance[:, 2]
90
+ y2 = points[:, 1] + distance[:, 3]
91
+ bbox = numpy.column_stack([ x1, y1, x2, y2 ])
92
+ return bbox
93
+
94
+
95
+ def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps:
96
+ x = points[:, 0::2] + distance[:, 0::2]
97
+ y = points[:, 1::2] + distance[:, 1::2]
98
+ kps = numpy.stack((x, y), axis = -1)
99
+ return kps
100
+
101
+
102
+ def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]:
103
+ keep_indices = []
104
+ dimension_list = numpy.reshape(bbox_list, (-1, 4))
105
+ x1 = dimension_list[:, 0]
106
+ y1 = dimension_list[:, 1]
107
+ x2 = dimension_list[:, 2]
108
+ y2 = dimension_list[:, 3]
109
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
110
+ indices = numpy.arange(len(bbox_list))
111
+ while indices.size > 0:
112
+ index = indices[0]
113
+ remain_indices = indices[1:]
114
+ keep_indices.append(index)
115
+ xx1 = numpy.maximum(x1[index], x1[remain_indices])
116
+ yy1 = numpy.maximum(y1[index], y1[remain_indices])
117
+ xx2 = numpy.minimum(x2[index], x2[remain_indices])
118
+ yy2 = numpy.minimum(y2[index], y2[remain_indices])
119
+ width = numpy.maximum(0, xx2 - xx1 + 1)
120
+ height = numpy.maximum(0, yy2 - yy1 + 1)
121
+ iou = width * height / (areas[index] + areas[remain_indices] - width * height)
122
+ indices = indices[numpy.where(iou <= iou_threshold)[0] + 1]
123
+ return keep_indices
facefusion/face_masker.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List
2
+ from cv2.typing import Size
3
+ from functools import lru_cache
4
+ import threading
5
+ import cv2
6
+ import numpy
7
+ import onnxruntime
8
+
9
+ import facefusion.globals
10
+ from facefusion.typing import Frame, Mask, Padding, FaceMaskRegion, ModelSet
11
+ from facefusion.execution_helper import apply_execution_provider_options
12
+ from facefusion.filesystem import resolve_relative_path
13
+ from facefusion.download import conditional_download
14
+
15
+ FACE_OCCLUDER = None
16
+ FACE_PARSER = None
17
+ THREAD_LOCK : threading.Lock = threading.Lock()
18
+ MODELS : ModelSet =\
19
+ {
20
+ 'face_occluder':
21
+ {
22
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_occluder.onnx',
23
+ 'path': resolve_relative_path('../.assets/models/face_occluder.onnx')
24
+ },
25
+ 'face_parser':
26
+ {
27
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_parser.onnx',
28
+ 'path': resolve_relative_path('../.assets/models/face_parser.onnx')
29
+ }
30
+ }
31
+ FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\
32
+ {
33
+ 'skin': 1,
34
+ 'left-eyebrow': 2,
35
+ 'right-eyebrow': 3,
36
+ 'left-eye': 4,
37
+ 'right-eye': 5,
38
+ 'eye-glasses': 6,
39
+ 'nose': 10,
40
+ 'mouth': 11,
41
+ 'upper-lip': 12,
42
+ 'lower-lip': 13
43
+ }
44
+
45
+
46
+ def get_face_occluder() -> Any:
47
+ global FACE_OCCLUDER
48
+
49
+ with THREAD_LOCK:
50
+ if FACE_OCCLUDER is None:
51
+ model_path = MODELS.get('face_occluder').get('path')
52
+ FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
53
+ return FACE_OCCLUDER
54
+
55
+
56
+ def get_face_parser() -> Any:
57
+ global FACE_PARSER
58
+
59
+ with THREAD_LOCK:
60
+ if FACE_PARSER is None:
61
+ model_path = MODELS.get('face_parser').get('path')
62
+ FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
63
+ return FACE_PARSER
64
+
65
+
66
+ def clear_face_occluder() -> None:
67
+ global FACE_OCCLUDER
68
+
69
+ FACE_OCCLUDER = None
70
+
71
+
72
+ def clear_face_parser() -> None:
73
+ global FACE_PARSER
74
+
75
+ FACE_PARSER = None
76
+
77
+
78
+ def pre_check() -> bool:
79
+ if not facefusion.globals.skip_download:
80
+ download_directory_path = resolve_relative_path('../.assets/models')
81
+ model_urls =\
82
+ [
83
+ MODELS.get('face_occluder').get('url'),
84
+ MODELS.get('face_parser').get('url'),
85
+ ]
86
+ conditional_download(download_directory_path, model_urls)
87
+ return True
88
+
89
+
90
+ @lru_cache(maxsize = None)
91
+ def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask:
92
+ blur_amount = int(crop_size[0] * 0.5 * face_mask_blur)
93
+ blur_area = max(blur_amount // 2, 1)
94
+ box_mask = numpy.ones(crop_size, numpy.float32)
95
+ box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0
96
+ box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0
97
+ box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0
98
+ box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0
99
+ if blur_amount > 0:
100
+ box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25)
101
+ return box_mask
102
+
103
+
104
+ def create_occlusion_mask(crop_frame : Frame) -> Mask:
105
+ face_occluder = get_face_occluder()
106
+ prepare_frame = cv2.resize(crop_frame, face_occluder.get_inputs()[0].shape[1:3][::-1])
107
+ prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32) / 255
108
+ prepare_frame = prepare_frame.transpose(0, 1, 2, 3)
109
+ occlusion_mask = face_occluder.run(None,
110
+ {
111
+ face_occluder.get_inputs()[0].name: prepare_frame
112
+ })[0][0]
113
+ occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
114
+ occlusion_mask = cv2.resize(occlusion_mask, crop_frame.shape[:2][::-1])
115
+ return occlusion_mask
116
+
117
+
118
+ def create_region_mask(crop_frame : Frame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
119
+ face_parser = get_face_parser()
120
+ prepare_frame = cv2.flip(cv2.resize(crop_frame, (512, 512)), 1)
121
+ prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1
122
+ prepare_frame = prepare_frame.transpose(0, 3, 1, 2)
123
+ region_mask = face_parser.run(None,
124
+ {
125
+ face_parser.get_inputs()[0].name: prepare_frame
126
+ })[0][0]
127
+ region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ])
128
+ region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_frame.shape[:2][::-1])
129
+ return region_mask
facefusion/face_store.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ import hashlib
3
+ import numpy
4
+
5
+ from facefusion.typing import Frame, Face, FaceStore, FaceSet
6
+
7
+ FACE_STORE: FaceStore =\
8
+ {
9
+ 'static_faces': {},
10
+ 'reference_faces': {}
11
+ }
12
+
13
+
14
+ def get_static_faces(frame : Frame) -> Optional[List[Face]]:
15
+ frame_hash = create_frame_hash(frame)
16
+ if frame_hash in FACE_STORE['static_faces']:
17
+ return FACE_STORE['static_faces'][frame_hash]
18
+ return None
19
+
20
+
21
+ def set_static_faces(frame : Frame, faces : List[Face]) -> None:
22
+ frame_hash = create_frame_hash(frame)
23
+ if frame_hash:
24
+ FACE_STORE['static_faces'][frame_hash] = faces
25
+
26
+
27
+ def clear_static_faces() -> None:
28
+ FACE_STORE['static_faces'] = {}
29
+
30
+
31
+ def create_frame_hash(frame : Frame) -> Optional[str]:
32
+ return hashlib.sha1(frame.tobytes()).hexdigest() if numpy.any(frame) else None
33
+
34
+
35
+ def get_reference_faces() -> Optional[FaceSet]:
36
+ if FACE_STORE['reference_faces']:
37
+ return FACE_STORE['reference_faces']
38
+ return None
39
+
40
+
41
+ def append_reference_face(name : str, face : Face) -> None:
42
+ if name not in FACE_STORE['reference_faces']:
43
+ FACE_STORE['reference_faces'][name] = []
44
+ FACE_STORE['reference_faces'][name].append(face)
45
+
46
+
47
+ def clear_reference_faces() -> None:
48
+ FACE_STORE['reference_faces'] = {}
facefusion/ffmpeg.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import subprocess
3
+
4
+ import facefusion.globals
5
+ from facefusion import logger
6
+ from facefusion.typing import OutputVideoPreset, Fps
7
+ from facefusion.filesystem import get_temp_frames_pattern, get_temp_output_video_path
8
+
9
+
10
+ def run_ffmpeg(args : List[str]) -> bool:
11
+ commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
12
+ commands.extend(args)
13
+ try:
14
+ subprocess.run(commands, stderr = subprocess.PIPE, check = True)
15
+ return True
16
+ except subprocess.CalledProcessError as exception:
17
+ logger.debug(exception.stderr.decode().strip(), __name__.upper())
18
+ return False
19
+
20
+
21
+ def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]:
22
+ commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
23
+ commands.extend(args)
24
+ return subprocess.Popen(commands, stdin = subprocess.PIPE)
25
+
26
+
27
+ def extract_frames(target_path : str, video_resolution : str, video_fps : Fps) -> bool:
28
+ temp_frame_compression = round(31 - (facefusion.globals.temp_frame_quality * 0.31))
29
+ trim_frame_start = facefusion.globals.trim_frame_start
30
+ trim_frame_end = facefusion.globals.trim_frame_end
31
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
32
+ commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ]
33
+ if trim_frame_start is not None and trim_frame_end is not None:
34
+ commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',scale=' + str(video_resolution) + ',fps=' + str(video_fps) ])
35
+ elif trim_frame_start is not None:
36
+ commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',scale=' + str(video_resolution) + ',fps=' + str(video_fps) ])
37
+ elif trim_frame_end is not None:
38
+ commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',scale=' + str(video_resolution) + ',fps=' + str(video_fps) ])
39
+ else:
40
+ commands.extend([ '-vf', 'scale=' + str(video_resolution) + ',fps=' + str(video_fps) ])
41
+ commands.extend([ '-vsync', '0', temp_frames_pattern ])
42
+ return run_ffmpeg(commands)
43
+
44
+
45
+ def compress_image(output_path : str) -> bool:
46
+ output_image_compression = round(31 - (facefusion.globals.output_image_quality * 0.31))
47
+ commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ]
48
+ return run_ffmpeg(commands)
49
+
50
+
51
+ def merge_video(target_path : str, video_fps : Fps) -> bool:
52
+ temp_output_video_path = get_temp_output_video_path(target_path)
53
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
54
+ commands = [ '-hwaccel', 'auto', '-r', str(video_fps), '-i', temp_frames_pattern, '-c:v', facefusion.globals.output_video_encoder ]
55
+ if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265' ]:
56
+ output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
57
+ commands.extend([ '-crf', str(output_video_compression), '-preset', facefusion.globals.output_video_preset ])
58
+ if facefusion.globals.output_video_encoder in [ 'libvpx-vp9' ]:
59
+ output_video_compression = round(63 - (facefusion.globals.output_video_quality * 0.63))
60
+ commands.extend([ '-crf', str(output_video_compression) ])
61
+ if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
62
+ output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
63
+ commands.extend([ '-cq', str(output_video_compression), '-preset', map_nvenc_preset(facefusion.globals.output_video_preset) ])
64
+ commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ])
65
+ return run_ffmpeg(commands)
66
+
67
+
68
+ def restore_audio(target_path : str, output_path : str, video_fps : Fps) -> bool:
69
+ trim_frame_start = facefusion.globals.trim_frame_start
70
+ trim_frame_end = facefusion.globals.trim_frame_end
71
+ temp_output_video_path = get_temp_output_video_path(target_path)
72
+ commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ]
73
+ if trim_frame_start is not None:
74
+ start_time = trim_frame_start / video_fps
75
+ commands.extend([ '-ss', str(start_time) ])
76
+ if trim_frame_end is not None:
77
+ end_time = trim_frame_end / video_fps
78
+ commands.extend([ '-to', str(end_time) ])
79
+ commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ])
80
+ return run_ffmpeg(commands)
81
+
82
+
83
+ def map_nvenc_preset(output_video_preset : OutputVideoPreset) -> Optional[str]:
84
+ if output_video_preset in [ 'ultrafast', 'superfast', 'veryfast' ]:
85
+ return 'p1'
86
+ if output_video_preset == 'faster':
87
+ return 'p2'
88
+ if output_video_preset == 'fast':
89
+ return 'p3'
90
+ if output_video_preset == 'medium':
91
+ return 'p4'
92
+ if output_video_preset == 'slow':
93
+ return 'p5'
94
+ if output_video_preset == 'slower':
95
+ return 'p6'
96
+ if output_video_preset == 'veryslow':
97
+ return 'p7'
98
+ return None
facefusion/filesystem.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import glob
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import filetype
7
+ from pathlib import Path
8
+
9
+ import facefusion.globals
10
+
11
+ TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'facefusion')
12
+ TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4'
13
+
14
+
15
+ def get_temp_frame_paths(target_path : str) -> List[str]:
16
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
17
+ return sorted(glob.glob(temp_frames_pattern))
18
+
19
+
20
+ def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
21
+ temp_directory_path = get_temp_directory_path(target_path)
22
+ return os.path.join(temp_directory_path, temp_frame_prefix + '.' + facefusion.globals.temp_frame_format)
23
+
24
+
25
+ def get_temp_directory_path(target_path : str) -> str:
26
+ target_name, _ = os.path.splitext(os.path.basename(target_path))
27
+ return os.path.join(TEMP_DIRECTORY_PATH, target_name)
28
+
29
+
30
+ def get_temp_output_video_path(target_path : str) -> str:
31
+ temp_directory_path = get_temp_directory_path(target_path)
32
+ return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME)
33
+
34
+
35
+ def create_temp(target_path : str) -> None:
36
+ temp_directory_path = get_temp_directory_path(target_path)
37
+ Path(temp_directory_path).mkdir(parents = True, exist_ok = True)
38
+
39
+
40
+ def move_temp(target_path : str, output_path : str) -> None:
41
+ temp_output_video_path = get_temp_output_video_path(target_path)
42
+ if is_file(temp_output_video_path):
43
+ if is_file(output_path):
44
+ os.remove(output_path)
45
+ shutil.move(temp_output_video_path, output_path)
46
+
47
+
48
+ def clear_temp(target_path : str) -> None:
49
+ temp_directory_path = get_temp_directory_path(target_path)
50
+ parent_directory_path = os.path.dirname(temp_directory_path)
51
+ if not facefusion.globals.keep_temp and is_directory(temp_directory_path):
52
+ shutil.rmtree(temp_directory_path)
53
+ if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
54
+ os.rmdir(parent_directory_path)
55
+
56
+
57
+ def is_file(file_path : str) -> bool:
58
+ return bool(file_path and os.path.isfile(file_path))
59
+
60
+
61
+ def is_directory(directory_path : str) -> bool:
62
+ return bool(directory_path and os.path.isdir(directory_path))
63
+
64
+
65
+ def is_image(image_path : str) -> bool:
66
+ if is_file(image_path):
67
+ return filetype.helpers.is_image(image_path)
68
+ return False
69
+
70
+
71
+ def are_images(image_paths : List[str]) -> bool:
72
+ if image_paths:
73
+ return all(is_image(image_path) for image_path in image_paths)
74
+ return False
75
+
76
+
77
+ def is_video(video_path : str) -> bool:
78
+ if is_file(video_path):
79
+ return filetype.helpers.is_video(video_path)
80
+ return False
81
+
82
+
83
+ def resolve_relative_path(path : str) -> str:
84
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
85
+
86
+
87
+ def list_directory(directory_path : str) -> Optional[List[str]]:
88
+ if is_directory(directory_path):
89
+ files = os.listdir(directory_path)
90
+ return [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ]
91
+ return None
facefusion/globals.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from facefusion.typing import LogLevel, VideoMemoryStrategy, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, OutputVideoPreset, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
4
+
5
+ # general
6
+ source_paths : Optional[List[str]] = None
7
+ target_path : Optional[str] = None
8
+ output_path : Optional[str] = None
9
+ # misc
10
+ skip_download : Optional[bool] = None
11
+ headless : Optional[bool] = None
12
+ log_level : Optional[LogLevel] = None
13
+ # execution
14
+ execution_providers : List[str] = []
15
+ execution_thread_count : Optional[int] = None
16
+ execution_queue_count : Optional[int] = None
17
+ # memory
18
+ video_memory_strategy : Optional[VideoMemoryStrategy] = None
19
+ system_memory_limit : Optional[int] = None
20
+ # face analyser
21
+ face_analyser_order : Optional[FaceAnalyserOrder] = None
22
+ face_analyser_age : Optional[FaceAnalyserAge] = None
23
+ face_analyser_gender : Optional[FaceAnalyserGender] = None
24
+ face_detector_model : Optional[FaceDetectorModel] = None
25
+ face_detector_size : Optional[str] = None
26
+ face_detector_score : Optional[float] = None
27
+ face_recognizer_model : Optional[FaceRecognizerModel] = None
28
+ # face selector
29
+ face_selector_mode : Optional[FaceSelectorMode] = None
30
+ reference_face_position : Optional[int] = None
31
+ reference_face_distance : Optional[float] = None
32
+ reference_frame_number : Optional[int] = None
33
+ # face mask
34
+ face_mask_types : Optional[List[FaceMaskType]] = None
35
+ face_mask_blur : Optional[float] = None
36
+ face_mask_padding : Optional[Padding] = None
37
+ face_mask_regions : Optional[List[FaceMaskRegion]] = None
38
+ # frame extraction
39
+ trim_frame_start : Optional[int] = None
40
+ trim_frame_end : Optional[int] = None
41
+ temp_frame_format : Optional[TempFrameFormat] = None
42
+ temp_frame_quality : Optional[int] = None
43
+ keep_temp : Optional[bool] = None
44
+ # output creation
45
+ output_image_quality : Optional[int] = None
46
+ output_video_encoder : Optional[OutputVideoEncoder] = None
47
+ output_video_preset : Optional[OutputVideoPreset] = None
48
+ output_video_quality : Optional[int] = None
49
+ output_video_resolution : Optional[str] = None
50
+ output_video_fps : Optional[float] = None
51
+ skip_audio : Optional[bool] = None
52
+ # frame processors
53
+ frame_processors : List[str] = []
54
+ # uis
55
+ ui_layouts : List[str] = []
facefusion/installer.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple
2
+ import sys
3
+ import os
4
+ import platform
5
+ import tempfile
6
+ import subprocess
7
+ from argparse import ArgumentParser, HelpFormatter
8
+
9
+ subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ])
10
+
11
+ import inquirer
12
+
13
+ from facefusion import metadata, wording
14
+
15
+ TORCH : Dict[str, str] =\
16
+ {
17
+ 'default': 'default',
18
+ 'cpu': 'cpu'
19
+ }
20
+ ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
21
+ {
22
+ 'default': ('onnxruntime', '1.16.3')
23
+ }
24
+ if platform.system().lower() == 'linux' or platform.system().lower() == 'windows':
25
+ TORCH['cuda'] = 'cu118'
26
+ TORCH['cuda-nightly'] = 'cu121'
27
+ ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.16.3')
28
+ ONNXRUNTIMES['cuda-nightly'] = ('onnxruntime-gpu', '1.17.0')
29
+ ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0')
30
+ if platform.system().lower() == 'linux':
31
+ TORCH['rocm'] = 'rocm5.6'
32
+ ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.16.3')
33
+ if platform.system().lower() == 'darwin':
34
+ ONNXRUNTIMES['coreml-legacy'] = ('onnxruntime-coreml', '1.13.1')
35
+ ONNXRUNTIMES['coreml-silicon'] = ('onnxruntime-silicon', '1.16.0')
36
+ if platform.system().lower() == 'windows':
37
+ ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.16.3')
38
+
39
+
40
+ def cli() -> None:
41
+ program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
42
+ program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys())
43
+ program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
44
+ program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true')
45
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
46
+ run(program)
47
+
48
+
49
+ def run(program : ArgumentParser) -> None:
50
+ args = program.parse_args()
51
+ python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
52
+
53
+ if not args.skip_venv:
54
+ os.environ['PIP_REQUIRE_VIRTUALENV'] = '1'
55
+ if args.torch and args.onnxruntime:
56
+ answers =\
57
+ {
58
+ 'torch': args.torch,
59
+ 'onnxruntime': args.onnxruntime
60
+ }
61
+ else:
62
+ answers = inquirer.prompt(
63
+ [
64
+ inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
65
+ inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
66
+ ])
67
+ if answers:
68
+ torch = answers['torch']
69
+ torch_wheel = TORCH[torch]
70
+ onnxruntime = answers['onnxruntime']
71
+ onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
72
+
73
+ subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ])
74
+ if torch_wheel == 'default':
75
+ subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--force-reinstall' ])
76
+ else:
77
+ subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel, '--force-reinstall' ])
78
+ if onnxruntime == 'rocm':
79
+ if python_id in [ 'cp39', 'cp310', 'cp311' ]:
80
+ wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+rocm56-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
81
+ wheel_path = os.path.join(tempfile.gettempdir(), wheel_name)
82
+ wheel_url = 'https://download.onnxruntime.ai/' + wheel_name
83
+ subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ])
84
+ subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ])
85
+ subprocess.call([ 'pip', 'install', wheel_path, '--force-reinstall' ])
86
+ os.remove(wheel_path)
87
+ else:
88
+ subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
89
+ if onnxruntime == 'cuda-nightly':
90
+ subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple', '--force-reinstall' ])
91
+ else:
92
+ subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ])
facefusion/logger.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR
3
+
4
+ from facefusion.typing import LogLevel
5
+
6
+
7
+ def init(log_level : LogLevel) -> None:
8
+ basicConfig(format = None)
9
+ get_package_logger().setLevel(get_log_levels()[log_level])
10
+
11
+
12
+ def get_package_logger() -> Logger:
13
+ return getLogger('facefusion')
14
+
15
+
16
+ def debug(message : str, scope : str) -> None:
17
+ get_package_logger().debug('[' + scope + '] ' + message)
18
+
19
+
20
+ def info(message : str, scope : str) -> None:
21
+ get_package_logger().info('[' + scope + '] ' + message)
22
+
23
+
24
+ def warn(message : str, scope : str) -> None:
25
+ get_package_logger().warning('[' + scope + '] ' + message)
26
+
27
+
28
+ def error(message : str, scope : str) -> None:
29
+ get_package_logger().error('[' + scope + '] ' + message)
30
+
31
+
32
+ def enable() -> None:
33
+ get_package_logger().disabled = False
34
+
35
+
36
+ def disable() -> None:
37
+ get_package_logger().disabled = True
38
+
39
+
40
+ def get_log_levels() -> Dict[LogLevel, int]:
41
+ return\
42
+ {
43
+ 'error': ERROR,
44
+ 'warn': WARNING,
45
+ 'info': INFO,
46
+ 'debug': DEBUG
47
+ }
facefusion/memory.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+
3
+ if platform.system().lower() == 'windows':
4
+ import ctypes
5
+ else:
6
+ import resource
7
+
8
+
9
+ def limit_system_memory(system_memory_limit : int = 1) -> bool:
10
+ if platform.system().lower() == 'darwin':
11
+ system_memory_limit = system_memory_limit * (1024 ** 6)
12
+ else:
13
+ system_memory_limit = system_memory_limit * (1024 ** 3)
14
+ try:
15
+ if platform.system().lower() == 'windows':
16
+ ctypes.windll.kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(system_memory_limit), ctypes.c_size_t(system_memory_limit)) # type: ignore[attr-defined]
17
+ else:
18
+ resource.setrlimit(resource.RLIMIT_DATA, (system_memory_limit, system_memory_limit))
19
+ return True
20
+ except Exception:
21
+ return False
facefusion/metadata.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ METADATA =\
2
+ {
3
+ 'name': 'FaceFusion',
4
+ 'description': 'Next generation face swapper and enhancer',
5
+ 'version': '2.2.0',
6
+ 'license': 'MIT',
7
+ 'author': 'Henry Ruhs',
8
+ 'url': 'https://facefusion.io'
9
+ }
10
+
11
+
12
+ def get(key : str) -> str:
13
+ return METADATA[key]
facefusion/normalizer.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import os
3
+
4
+ from facefusion.filesystem import is_file, is_directory
5
+ from facefusion.typing import Padding, Fps
6
+
7
+
8
+ def normalize_output_path(source_paths : List[str], target_path : str, output_path : str) -> Optional[str]:
9
+ if is_file(target_path) and is_directory(output_path):
10
+ target_name, target_extension = os.path.splitext(os.path.basename(target_path))
11
+ if source_paths and is_file(source_paths[0]):
12
+ source_name, _ = os.path.splitext(os.path.basename(source_paths[0]))
13
+ return os.path.join(output_path, source_name + '-' + target_name + target_extension)
14
+ return os.path.join(output_path, target_name + target_extension)
15
+ if is_file(target_path) and output_path:
16
+ _, target_extension = os.path.splitext(os.path.basename(target_path))
17
+ output_name, output_extension = os.path.splitext(os.path.basename(output_path))
18
+ output_directory_path = os.path.dirname(output_path)
19
+ if is_directory(output_directory_path) and output_extension:
20
+ return os.path.join(output_directory_path, output_name + target_extension)
21
+ return None
22
+ return output_path
23
+
24
+
25
+ def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:
26
+ if padding and len(padding) == 1:
27
+ return tuple([ padding[0], padding[0], padding[0], padding[0] ]) # type: ignore[return-value]
28
+ if padding and len(padding) == 2:
29
+ return tuple([ padding[0], padding[1], padding[0], padding[1] ]) # type: ignore[return-value]
30
+ if padding and len(padding) == 3:
31
+ return tuple([ padding[0], padding[1], padding[2], padding[1] ]) # type: ignore[return-value]
32
+ if padding and len(padding) == 4:
33
+ return tuple(padding) # type: ignore[return-value]
34
+ return None
35
+
36
+
37
+ def normalize_fps(fps : Optional[float]) -> Optional[Fps]:
38
+ if fps is not None:
39
+ if fps < 1.0:
40
+ return 1.0
41
+ if fps > 60.0:
42
+ return 60.0
43
+ return fps
44
+ return None
facefusion/processors/__init__.py ADDED
File without changes
facefusion/processors/frame/__init__.py ADDED
File without changes
facefusion/processors/frame/choices.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from facefusion.common_helper import create_int_range
4
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
5
+
6
+ face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
7
+ face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
8
+ frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
9
+ face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
10
+
11
+ face_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
12
+ frame_enhancer_blend_range : List[int] = create_int_range(0, 100, 1)
13
+
facefusion/processors/frame/core.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import importlib
3
+ from concurrent.futures import ThreadPoolExecutor, as_completed
4
+ from queue import Queue
5
+ from types import ModuleType
6
+ from typing import Any, List
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion.typing import Process_Frames
11
+ from facefusion.execution_helper import encode_execution_providers
12
+ from facefusion import logger, wording
13
+
14
+ FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
+ FRAME_PROCESSORS_METHODS =\
16
+ [
17
+ 'get_frame_processor',
18
+ 'clear_frame_processor',
19
+ 'get_options',
20
+ 'set_options',
21
+ 'register_args',
22
+ 'apply_args',
23
+ 'pre_check',
24
+ 'post_check',
25
+ 'pre_process',
26
+ 'post_process',
27
+ 'get_reference_frame',
28
+ 'process_frame',
29
+ 'process_frames',
30
+ 'process_image',
31
+ 'process_video'
32
+ ]
33
+
34
+
35
+ def load_frame_processor_module(frame_processor : str) -> Any:
36
+ try:
37
+ frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
38
+ for method_name in FRAME_PROCESSORS_METHODS:
39
+ if not hasattr(frame_processor_module, method_name):
40
+ raise NotImplementedError
41
+ except ModuleNotFoundError as exception:
42
+ logger.error(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor), __name__.upper())
43
+ logger.debug(exception.msg, __name__.upper())
44
+ sys.exit(1)
45
+ except NotImplementedError:
46
+ logger.error(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor), __name__.upper())
47
+ sys.exit(1)
48
+ return frame_processor_module
49
+
50
+
51
+ def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
52
+ global FRAME_PROCESSORS_MODULES
53
+
54
+ if not FRAME_PROCESSORS_MODULES:
55
+ for frame_processor in frame_processors:
56
+ frame_processor_module = load_frame_processor_module(frame_processor)
57
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
58
+ return FRAME_PROCESSORS_MODULES
59
+
60
+
61
+ def clear_frame_processors_modules() -> None:
62
+ global FRAME_PROCESSORS_MODULES
63
+
64
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
65
+ frame_processor_module.clear_frame_processor()
66
+ FRAME_PROCESSORS_MODULES = []
67
+
68
+
69
+ def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
70
+ with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
71
+ progress.set_postfix(
72
+ {
73
+ 'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
74
+ 'execution_thread_count': facefusion.globals.execution_thread_count,
75
+ 'execution_queue_count': facefusion.globals.execution_queue_count
76
+ })
77
+ with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
78
+ futures = []
79
+ queue_frame_paths : Queue[str] = create_queue(temp_frame_paths)
80
+ queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
81
+ while not queue_frame_paths.empty():
82
+ submit_frame_paths = pick_queue(queue_frame_paths, queue_per_future)
83
+ future = executor.submit(process_frames, source_paths, submit_frame_paths, progress.update)
84
+ futures.append(future)
85
+ for future_done in as_completed(futures):
86
+ future_done.result()
87
+
88
+
89
+ def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
90
+ queue : Queue[str] = Queue()
91
+ for frame_path in temp_frame_paths:
92
+ queue.put(frame_path)
93
+ return queue
94
+
95
+
96
+ def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
97
+ queues = []
98
+ for _ in range(queue_per_future):
99
+ if not queue.empty():
100
+ queues.append(queue.get())
101
+ return queues
facefusion/processors/frame/globals.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
4
+
5
+ face_swapper_model : Optional[FaceSwapperModel] = None
6
+ face_enhancer_model : Optional[FaceEnhancerModel] = None
7
+ face_enhancer_blend : Optional[int] = None
8
+ frame_enhancer_model : Optional[FrameEnhancerModel] = None
9
+ frame_enhancer_blend : Optional[int] = None
10
+ face_debugger_items : Optional[List[FaceDebuggerItem]] = None
facefusion/processors/frame/modules/__init__.py ADDED
File without changes
facefusion/processors/frame/modules/face_debugger.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal
2
+ from argparse import ArgumentParser
3
+ import cv2
4
+ import numpy
5
+
6
+ import facefusion.globals
7
+ import facefusion.processors.frame.core as frame_processors
8
+ from facefusion import config, wording
9
+ from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
10
+ from facefusion.face_store import get_reference_faces
11
+ from facefusion.content_analyser import clear_content_analyser
12
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode
13
+ from facefusion.vision import read_image, read_static_image, read_static_images, write_image
14
+ from facefusion.face_helper import warp_face_by_kps
15
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
16
+ from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
17
+
18
+ NAME = __name__.upper()
19
+
20
+
21
+ def get_frame_processor() -> None:
22
+ pass
23
+
24
+
25
+ def clear_frame_processor() -> None:
26
+ pass
27
+
28
+
29
+ def get_options(key : Literal['model']) -> None:
30
+ pass
31
+
32
+
33
+ def set_options(key : Literal['model'], value : Any) -> None:
34
+ pass
35
+
36
+
37
+ def register_args(program : ArgumentParser) -> None:
38
+ program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = config.get_str_list('frame_processors.face_debugger_items', 'kps face-mask'), choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
39
+
40
+
41
+ def apply_args(program : ArgumentParser) -> None:
42
+ args = program.parse_args()
43
+ frame_processors_globals.face_debugger_items = args.face_debugger_items
44
+
45
+
46
+ def pre_check() -> bool:
47
+ return True
48
+
49
+
50
+ def post_check() -> bool:
51
+ return True
52
+
53
+
54
+ def pre_process(mode : ProcessMode) -> bool:
55
+ return True
56
+
57
+
58
+ def post_process() -> None:
59
+ if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
60
+ clear_frame_processor()
61
+ read_static_image.cache_clear()
62
+ if facefusion.globals.video_memory_strategy == 'strict':
63
+ clear_face_analyser()
64
+ clear_content_analyser()
65
+ clear_face_occluder()
66
+ clear_face_parser()
67
+
68
+
69
+ def debug_face(source_face : Face, target_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
70
+ primary_color = (0, 0, 255)
71
+ secondary_color = (0, 255, 0)
72
+ bounding_box = target_face.bbox.astype(numpy.int32)
73
+ temp_frame = temp_frame.copy()
74
+ if 'bbox' in frame_processors_globals.face_debugger_items:
75
+ cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
76
+ if 'face-mask' in frame_processors_globals.face_debugger_items:
77
+ crop_frame, affine_matrix = warp_face_by_kps(temp_frame, target_face.kps, 'arcface_128_v2', (512, 512))
78
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
79
+ temp_frame_size = temp_frame.shape[:2][::-1]
80
+ crop_mask_list = []
81
+ if 'box' in facefusion.globals.face_mask_types:
82
+ crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], 0, facefusion.globals.face_mask_padding))
83
+ if 'occlusion' in facefusion.globals.face_mask_types:
84
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
85
+ if 'region' in facefusion.globals.face_mask_types:
86
+ crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
87
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
88
+ crop_mask = (crop_mask * 255).astype(numpy.uint8)
89
+ inverse_mask_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size)
90
+ inverse_mask_frame = cv2.threshold(inverse_mask_frame, 100, 255, cv2.THRESH_BINARY)[1]
91
+ inverse_mask_frame[inverse_mask_frame > 0] = 255
92
+ inverse_mask_contours = cv2.findContours(inverse_mask_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
93
+ cv2.drawContours(temp_frame, inverse_mask_contours, -1, primary_color, 2)
94
+ if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
95
+ if 'kps' in frame_processors_globals.face_debugger_items:
96
+ kps = target_face.kps.astype(numpy.int32)
97
+ for index in range(kps.shape[0]):
98
+ cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1)
99
+ if 'score' in frame_processors_globals.face_debugger_items:
100
+ face_score_text = str(round(target_face.score, 2))
101
+ face_score_position = (bounding_box[0] + 10, bounding_box[1] + 20)
102
+ cv2.putText(temp_frame, face_score_text, face_score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2)
103
+ return temp_frame
104
+
105
+
106
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
107
+ pass
108
+
109
+
110
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
111
+ if 'reference' in facefusion.globals.face_selector_mode:
112
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
113
+ if similar_faces:
114
+ for similar_face in similar_faces:
115
+ temp_frame = debug_face(source_face, similar_face, reference_faces, temp_frame)
116
+ if 'one' in facefusion.globals.face_selector_mode:
117
+ target_face = get_one_face(temp_frame)
118
+ if target_face:
119
+ temp_frame = debug_face(source_face, target_face, None, temp_frame)
120
+ if 'many' in facefusion.globals.face_selector_mode:
121
+ many_faces = get_many_faces(temp_frame)
122
+ if many_faces:
123
+ for target_face in many_faces:
124
+ temp_frame = debug_face(source_face, target_face, None, temp_frame)
125
+ return temp_frame
126
+
127
+
128
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
129
+ source_frames = read_static_images(source_paths)
130
+ source_face = get_average_face(source_frames)
131
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
132
+ for temp_frame_path in temp_frame_paths:
133
+ temp_frame = read_image(temp_frame_path)
134
+ result_frame = process_frame(source_face, reference_faces, temp_frame)
135
+ write_image(temp_frame_path, result_frame)
136
+ update_progress()
137
+
138
+
139
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
140
+ source_frames = read_static_images(source_paths)
141
+ source_face = get_average_face(source_frames)
142
+ target_frame = read_static_image(target_path)
143
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
144
+ result_frame = process_frame(source_face, reference_faces, target_frame)
145
+ write_image(output_path, result_frame)
146
+
147
+
148
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
149
+ frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_enhancer.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import cv2
4
+ import threading
5
+ import numpy
6
+ import onnxruntime
7
+
8
+ import facefusion.globals
9
+ import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import config, logger, wording
11
+ from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face
12
+ from facefusion.execution_helper import apply_execution_provider_options
13
+ from facefusion.face_helper import warp_face_by_kps, paste_back
14
+ from facefusion.content_analyser import clear_content_analyser
15
+ from facefusion.face_store import get_reference_faces
16
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
17
+ from facefusion.common_helper import create_metavar
18
+ from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path
19
+ from facefusion.download import conditional_download, is_download_done
20
+ from facefusion.vision import read_image, read_static_image, write_image
21
+ from facefusion.processors.frame import globals as frame_processors_globals
22
+ from facefusion.processors.frame import choices as frame_processors_choices
23
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder
24
+
25
+ FRAME_PROCESSOR = None
26
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
27
+ THREAD_LOCK : threading.Lock = threading.Lock()
28
+ NAME = __name__.upper()
29
+ MODELS : ModelSet =\
30
+ {
31
+ 'codeformer':
32
+ {
33
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
34
+ 'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
35
+ 'template': 'ffhq_512',
36
+ 'size': (512, 512)
37
+ },
38
+ 'gfpgan_1.2':
39
+ {
40
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
41
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
42
+ 'template': 'ffhq_512',
43
+ 'size': (512, 512)
44
+ },
45
+ 'gfpgan_1.3':
46
+ {
47
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
48
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
49
+ 'template': 'ffhq_512',
50
+ 'size': (512, 512)
51
+ },
52
+ 'gfpgan_1.4':
53
+ {
54
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
55
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
56
+ 'template': 'ffhq_512',
57
+ 'size': (512, 512)
58
+ },
59
+ 'gpen_bfr_256':
60
+ {
61
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
62
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
63
+ 'template': 'arcface_128_v2',
64
+ 'size': (256, 256)
65
+ },
66
+ 'gpen_bfr_512':
67
+ {
68
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
69
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
70
+ 'template': 'ffhq_512',
71
+ 'size': (512, 512)
72
+ },
73
+ 'restoreformer':
74
+ {
75
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
76
+ 'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
77
+ 'template': 'ffhq_512',
78
+ 'size': (512, 512)
79
+ }
80
+ }
81
+ OPTIONS : Optional[OptionsWithModel] = None
82
+
83
+
84
+ def get_frame_processor() -> Any:
85
+ global FRAME_PROCESSOR
86
+
87
+ with THREAD_LOCK:
88
+ if FRAME_PROCESSOR is None:
89
+ model_path = get_options('model').get('path')
90
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
91
+ return FRAME_PROCESSOR
92
+
93
+
94
+ def clear_frame_processor() -> None:
95
+ global FRAME_PROCESSOR
96
+
97
+ FRAME_PROCESSOR = None
98
+
99
+
100
+ def get_options(key : Literal['model']) -> Any:
101
+ global OPTIONS
102
+
103
+ if OPTIONS is None:
104
+ OPTIONS =\
105
+ {
106
+ 'model': MODELS[frame_processors_globals.face_enhancer_model]
107
+ }
108
+ return OPTIONS.get(key)
109
+
110
+
111
+ def set_options(key : Literal['model'], value : Any) -> None:
112
+ global OPTIONS
113
+
114
+ OPTIONS[key] = value
115
+
116
+
117
+ def register_args(program : ArgumentParser) -> None:
118
+ program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models)
119
+ program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
120
+
121
+
122
+ def apply_args(program : ArgumentParser) -> None:
123
+ args = program.parse_args()
124
+ frame_processors_globals.face_enhancer_model = args.face_enhancer_model
125
+ frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
126
+
127
+
128
+ def pre_check() -> bool:
129
+ if not facefusion.globals.skip_download:
130
+ download_directory_path = resolve_relative_path('../.assets/models')
131
+ model_url = get_options('model').get('url')
132
+ conditional_download(download_directory_path, [ model_url ])
133
+ return True
134
+
135
+
136
+ def post_check() -> bool:
137
+ model_url = get_options('model').get('url')
138
+ model_path = get_options('model').get('path')
139
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
140
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
141
+ return False
142
+ elif not is_file(model_path):
143
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
144
+ return False
145
+ return True
146
+
147
+
148
+ def pre_process(mode : ProcessMode) -> bool:
149
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
150
+ logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
151
+ return False
152
+ if mode == 'output' and not facefusion.globals.output_path:
153
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
154
+ return False
155
+ return True
156
+
157
+
158
+ def post_process() -> None:
159
+ if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
160
+ clear_frame_processor()
161
+ read_static_image.cache_clear()
162
+ if facefusion.globals.video_memory_strategy == 'strict':
163
+ clear_face_analyser()
164
+ clear_content_analyser()
165
+ clear_face_occluder()
166
+
167
+
168
+ def enhance_face(target_face: Face, temp_frame : Frame) -> Frame:
169
+ model_template = get_options('model').get('template')
170
+ model_size = get_options('model').get('size')
171
+ crop_frame, affine_matrix = warp_face_by_kps(temp_frame, target_face.kps, model_template, model_size)
172
+ crop_mask_list =\
173
+ [
174
+ create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0))
175
+ ]
176
+ if 'occlusion' in facefusion.globals.face_mask_types:
177
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
178
+ crop_frame = prepare_crop_frame(crop_frame)
179
+ crop_frame = apply_enhance(crop_frame)
180
+ crop_frame = normalize_crop_frame(crop_frame)
181
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
182
+ paste_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
183
+ temp_frame = blend_frame(temp_frame, paste_frame)
184
+ return temp_frame
185
+
186
+
187
+ def apply_enhance(crop_frame : Frame) -> Frame:
188
+ frame_processor = get_frame_processor()
189
+ frame_processor_inputs = {}
190
+
191
+ for frame_processor_input in frame_processor.get_inputs():
192
+ if frame_processor_input.name == 'input':
193
+ frame_processor_inputs[frame_processor_input.name] = crop_frame
194
+ if frame_processor_input.name == 'weight':
195
+ weight = numpy.array([ 1 ], dtype = numpy.double)
196
+ frame_processor_inputs[frame_processor_input.name] = weight
197
+ with THREAD_SEMAPHORE:
198
+ crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
199
+ return crop_frame
200
+
201
+
202
+ def prepare_crop_frame(crop_frame : Frame) -> Frame:
203
+ crop_frame = crop_frame[:, :, ::-1] / 255.0
204
+ crop_frame = (crop_frame - 0.5) / 0.5
205
+ crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
206
+ return crop_frame
207
+
208
+
209
+ def normalize_crop_frame(crop_frame : Frame) -> Frame:
210
+ crop_frame = numpy.clip(crop_frame, -1, 1)
211
+ crop_frame = (crop_frame + 1) / 2
212
+ crop_frame = crop_frame.transpose(1, 2, 0)
213
+ crop_frame = (crop_frame * 255.0).round()
214
+ crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1]
215
+ return crop_frame
216
+
217
+
218
+ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
219
+ face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
220
+ temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
221
+ return temp_frame
222
+
223
+
224
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
225
+ return enhance_face(target_face, temp_frame)
226
+
227
+
228
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
229
+ if 'reference' in facefusion.globals.face_selector_mode:
230
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
231
+ if similar_faces:
232
+ for similar_face in similar_faces:
233
+ temp_frame = enhance_face(similar_face, temp_frame)
234
+ if 'one' in facefusion.globals.face_selector_mode:
235
+ target_face = get_one_face(temp_frame)
236
+ if target_face:
237
+ temp_frame = enhance_face(target_face, temp_frame)
238
+ if 'many' in facefusion.globals.face_selector_mode:
239
+ many_faces = get_many_faces(temp_frame)
240
+ if many_faces:
241
+ for target_face in many_faces:
242
+ temp_frame = enhance_face(target_face, temp_frame)
243
+ return temp_frame
244
+
245
+
246
+ def process_frames(source_path : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
247
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
248
+ for temp_frame_path in temp_frame_paths:
249
+ temp_frame = read_image(temp_frame_path)
250
+ result_frame = process_frame(None, reference_faces, temp_frame)
251
+ write_image(temp_frame_path, result_frame)
252
+ update_progress()
253
+
254
+
255
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
256
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
257
+ target_frame = read_static_image(target_path)
258
+ result_frame = process_frame(None, reference_faces, target_frame)
259
+ write_image(output_path, result_frame)
260
+
261
+
262
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
263
+ frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_swapper.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import platform
4
+ import threading
5
+ import numpy
6
+ import onnx
7
+ import onnxruntime
8
+ from onnx import numpy_helper
9
+
10
+ import facefusion.globals
11
+ import facefusion.processors.frame.core as frame_processors
12
+ from facefusion import config, logger, wording
13
+ from facefusion.execution_helper import apply_execution_provider_options
14
+ from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
15
+ from facefusion.face_helper import warp_face_by_kps, paste_back
16
+ from facefusion.face_store import get_reference_faces
17
+ from facefusion.content_analyser import clear_content_analyser
18
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, Embedding
19
+ from facefusion.filesystem import is_file, is_image, are_images, is_video, resolve_relative_path
20
+ from facefusion.download import conditional_download, is_download_done
21
+ from facefusion.vision import read_image, read_static_image, read_static_images, write_image
22
+ from facefusion.processors.frame import globals as frame_processors_globals
23
+ from facefusion.processors.frame import choices as frame_processors_choices
24
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
25
+
26
+ FRAME_PROCESSOR = None
27
+ MODEL_MATRIX = None
28
+ THREAD_LOCK : threading.Lock = threading.Lock()
29
+ NAME = __name__.upper()
30
+ MODELS : ModelSet =\
31
+ {
32
+ 'blendswap_256':
33
+ {
34
+ 'type': 'blendswap',
35
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendswap_256.onnx',
36
+ 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'),
37
+ 'template': 'ffhq_512',
38
+ 'size': (256, 256),
39
+ 'mean': [ 0.0, 0.0, 0.0 ],
40
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
41
+ },
42
+ 'inswapper_128':
43
+ {
44
+ 'type': 'inswapper',
45
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
46
+ 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
47
+ 'template': 'arcface_128_v2',
48
+ 'size': (128, 128),
49
+ 'mean': [ 0.0, 0.0, 0.0 ],
50
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
51
+ },
52
+ 'inswapper_128_fp16':
53
+ {
54
+ 'type': 'inswapper',
55
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
56
+ 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
57
+ 'template': 'arcface_128_v2',
58
+ 'size': (128, 128),
59
+ 'mean': [ 0.0, 0.0, 0.0 ],
60
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
61
+ },
62
+ 'simswap_256':
63
+ {
64
+ 'type': 'simswap',
65
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
66
+ 'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
67
+ 'template': 'arcface_112_v1',
68
+ 'size': (256, 256),
69
+ 'mean': [ 0.485, 0.456, 0.406 ],
70
+ 'standard_deviation': [ 0.229, 0.224, 0.225 ]
71
+ },
72
+ 'simswap_512_unofficial':
73
+ {
74
+ 'type': 'simswap',
75
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
76
+ 'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
77
+ 'template': 'arcface_112_v1',
78
+ 'size': (512, 512),
79
+ 'mean': [ 0.0, 0.0, 0.0 ],
80
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
81
+ }
82
+ }
83
+ OPTIONS : Optional[OptionsWithModel] = None
84
+
85
+
86
+ def get_frame_processor() -> Any:
87
+ global FRAME_PROCESSOR
88
+
89
+ with THREAD_LOCK:
90
+ if FRAME_PROCESSOR is None:
91
+ model_path = get_options('model').get('path')
92
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
93
+ return FRAME_PROCESSOR
94
+
95
+
96
+ def clear_frame_processor() -> None:
97
+ global FRAME_PROCESSOR
98
+
99
+ FRAME_PROCESSOR = None
100
+
101
+
102
+ def get_model_matrix() -> Any:
103
+ global MODEL_MATRIX
104
+
105
+ with THREAD_LOCK:
106
+ if MODEL_MATRIX is None:
107
+ model_path = get_options('model').get('path')
108
+ model = onnx.load(model_path)
109
+ MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
110
+ return MODEL_MATRIX
111
+
112
+
113
+ def clear_model_matrix() -> None:
114
+ global MODEL_MATRIX
115
+
116
+ MODEL_MATRIX = None
117
+
118
+
119
+ def get_options(key : Literal['model']) -> Any:
120
+ global OPTIONS
121
+
122
+ if OPTIONS is None:
123
+ OPTIONS =\
124
+ {
125
+ 'model': MODELS[frame_processors_globals.face_swapper_model]
126
+ }
127
+ return OPTIONS.get(key)
128
+
129
+
130
+ def set_options(key : Literal['model'], value : Any) -> None:
131
+ global OPTIONS
132
+
133
+ OPTIONS[key] = value
134
+
135
+
136
+ def register_args(program : ArgumentParser) -> None:
137
+ if platform.system().lower() == 'darwin':
138
+ face_swapper_model_fallback = 'inswapper_128'
139
+ else:
140
+ face_swapper_model_fallback = 'inswapper_128_fp16'
141
+ program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.face_swapper_model', face_swapper_model_fallback), choices = frame_processors_choices.face_swapper_models)
142
+
143
+
144
+ def apply_args(program : ArgumentParser) -> None:
145
+ args = program.parse_args()
146
+ frame_processors_globals.face_swapper_model = args.face_swapper_model
147
+ if args.face_swapper_model == 'blendswap_256':
148
+ facefusion.globals.face_recognizer_model = 'arcface_blendswap'
149
+ if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
150
+ facefusion.globals.face_recognizer_model = 'arcface_inswapper'
151
+ if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
152
+ facefusion.globals.face_recognizer_model = 'arcface_simswap'
153
+
154
+
155
+ def pre_check() -> bool:
156
+ if not facefusion.globals.skip_download:
157
+ download_directory_path = resolve_relative_path('../.assets/models')
158
+ model_url = get_options('model').get('url')
159
+ conditional_download(download_directory_path, [ model_url ])
160
+ return True
161
+
162
+
163
+ def post_check() -> bool:
164
+ model_url = get_options('model').get('url')
165
+ model_path = get_options('model').get('path')
166
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
167
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
168
+ return False
169
+ elif not is_file(model_path):
170
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
171
+ return False
172
+ return True
173
+
174
+
175
+ def pre_process(mode : ProcessMode) -> bool:
176
+ if not are_images(facefusion.globals.source_paths):
177
+ logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
178
+ return False
179
+ for source_frame in read_static_images(facefusion.globals.source_paths):
180
+ if not get_one_face(source_frame):
181
+ logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
182
+ return False
183
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
184
+ logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
185
+ return False
186
+ if mode == 'output' and not facefusion.globals.output_path:
187
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
188
+ return False
189
+ return True
190
+
191
+
192
+ def post_process() -> None:
193
+ if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
194
+ clear_frame_processor()
195
+ clear_model_matrix()
196
+ read_static_image.cache_clear()
197
+ if facefusion.globals.video_memory_strategy == 'strict':
198
+ clear_face_analyser()
199
+ clear_content_analyser()
200
+ clear_face_occluder()
201
+ clear_face_parser()
202
+
203
+
204
+ def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
205
+ model_template = get_options('model').get('template')
206
+ model_size = get_options('model').get('size')
207
+ crop_frame, affine_matrix = warp_face_by_kps(temp_frame, target_face.kps, model_template, model_size)
208
+ crop_mask_list = []
209
+
210
+ if 'box' in facefusion.globals.face_mask_types:
211
+ crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding))
212
+ if 'occlusion' in facefusion.globals.face_mask_types:
213
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
214
+ crop_frame = prepare_crop_frame(crop_frame)
215
+ crop_frame = apply_swap(source_face, crop_frame)
216
+ crop_frame = normalize_crop_frame(crop_frame)
217
+ if 'region' in facefusion.globals.face_mask_types:
218
+ crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
219
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
220
+ temp_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
221
+ return temp_frame
222
+
223
+
224
+ def apply_swap(source_face : Face, crop_frame : Frame) -> Frame:
225
+ frame_processor = get_frame_processor()
226
+ model_type = get_options('model').get('type')
227
+ frame_processor_inputs = {}
228
+
229
+ for frame_processor_input in frame_processor.get_inputs():
230
+ if frame_processor_input.name == 'source':
231
+ if model_type == 'blendswap':
232
+ frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
233
+ else:
234
+ frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
235
+ if frame_processor_input.name == 'target':
236
+ frame_processor_inputs[frame_processor_input.name] = crop_frame
237
+ crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
238
+ return crop_frame
239
+
240
+
241
+ def prepare_source_frame(source_face : Face) -> Frame:
242
+ source_frame = read_static_image(facefusion.globals.source_paths[0])
243
+ source_frame, _ = warp_face_by_kps(source_frame, source_face.kps, 'arcface_112_v2', (112, 112))
244
+ source_frame = source_frame[:, :, ::-1] / 255.0
245
+ source_frame = source_frame.transpose(2, 0, 1)
246
+ source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
247
+ return source_frame
248
+
249
+
250
+ def prepare_source_embedding(source_face : Face) -> Embedding:
251
+ model_type = get_options('model').get('type')
252
+ if model_type == 'inswapper':
253
+ model_matrix = get_model_matrix()
254
+ source_embedding = source_face.embedding.reshape((1, -1))
255
+ source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
256
+ else:
257
+ source_embedding = source_face.normed_embedding.reshape(1, -1)
258
+ return source_embedding
259
+
260
+
261
+ def prepare_crop_frame(crop_frame : Frame) -> Frame:
262
+ model_mean = get_options('model').get('mean')
263
+ model_standard_deviation = get_options('model').get('standard_deviation')
264
+ crop_frame = crop_frame[:, :, ::-1] / 255.0
265
+ crop_frame = (crop_frame - model_mean) / model_standard_deviation
266
+ crop_frame = crop_frame.transpose(2, 0, 1)
267
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
268
+ return crop_frame
269
+
270
+
271
+ def normalize_crop_frame(crop_frame : Frame) -> Frame:
272
+ crop_frame = crop_frame.transpose(1, 2, 0)
273
+ crop_frame = (crop_frame * 255.0).round()
274
+ crop_frame = crop_frame[:, :, ::-1]
275
+ return crop_frame
276
+
277
+
278
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
279
+ return swap_face(source_face, target_face, temp_frame)
280
+
281
+
282
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
283
+ if 'reference' in facefusion.globals.face_selector_mode:
284
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
285
+ if similar_faces:
286
+ for similar_face in similar_faces:
287
+ temp_frame = swap_face(source_face, similar_face, temp_frame)
288
+ if 'one' in facefusion.globals.face_selector_mode:
289
+ target_face = get_one_face(temp_frame)
290
+ if target_face:
291
+ temp_frame = swap_face(source_face, target_face, temp_frame)
292
+ if 'many' in facefusion.globals.face_selector_mode:
293
+ many_faces = get_many_faces(temp_frame)
294
+ if many_faces:
295
+ for target_face in many_faces:
296
+ temp_frame = swap_face(source_face, target_face, temp_frame)
297
+ return temp_frame
298
+
299
+
300
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
301
+ source_frames = read_static_images(source_paths)
302
+ source_face = get_average_face(source_frames)
303
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
304
+ for temp_frame_path in temp_frame_paths:
305
+ temp_frame = read_image(temp_frame_path)
306
+ result_frame = process_frame(source_face, reference_faces, temp_frame)
307
+ write_image(temp_frame_path, result_frame)
308
+ update_progress()
309
+
310
+
311
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
312
+ source_frames = read_static_images(source_paths)
313
+ source_face = get_average_face(source_frames)
314
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
315
+ target_frame = read_static_image(target_path)
316
+ result_frame = process_frame(source_face, reference_faces, target_frame)
317
+ write_image(output_path, result_frame)
318
+
319
+
320
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
321
+ frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/frame_enhancer.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import threading
4
+ import cv2
5
+ from basicsr.archs.rrdbnet_arch import RRDBNet
6
+ from realesrgan import RealESRGANer
7
+
8
+ import facefusion.globals
9
+ import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import config, logger, wording
11
+ from facefusion.face_analyser import clear_face_analyser
12
+ from facefusion.content_analyser import clear_content_analyser
13
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
14
+ from facefusion.common_helper import create_metavar
15
+ from facefusion.execution_helper import map_torch_backend
16
+ from facefusion.filesystem import is_file, resolve_relative_path
17
+ from facefusion.download import conditional_download, is_download_done
18
+ from facefusion.vision import read_image, read_static_image, write_image
19
+ from facefusion.processors.frame import globals as frame_processors_globals
20
+ from facefusion.processors.frame import choices as frame_processors_choices
21
+
22
+ FRAME_PROCESSOR = None
23
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
24
+ THREAD_LOCK : threading.Lock = threading.Lock()
25
+ NAME = __name__.upper()
26
+ MODELS : ModelSet =\
27
+ {
28
+ 'real_esrgan_x2plus':
29
+ {
30
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2plus.pth',
31
+ 'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'),
32
+ 'scale': 2
33
+ },
34
+ 'real_esrgan_x4plus':
35
+ {
36
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4plus.pth',
37
+ 'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'),
38
+ 'scale': 4
39
+ },
40
+ 'real_esrnet_x4plus':
41
+ {
42
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrnet_x4plus.pth',
43
+ 'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'),
44
+ 'scale': 4
45
+ }
46
+ }
47
+ OPTIONS : Optional[OptionsWithModel] = None
48
+
49
+
50
+ def get_frame_processor() -> Any:
51
+ global FRAME_PROCESSOR
52
+
53
+ with THREAD_LOCK:
54
+ if FRAME_PROCESSOR is None:
55
+ model_path = get_options('model').get('path')
56
+ model_scale = get_options('model').get('scale')
57
+ FRAME_PROCESSOR = RealESRGANer(
58
+ model_path = model_path,
59
+ model = RRDBNet(
60
+ num_in_ch = 3,
61
+ num_out_ch = 3,
62
+ scale = model_scale
63
+ ),
64
+ device = map_torch_backend(facefusion.globals.execution_providers),
65
+ scale = model_scale
66
+ )
67
+ return FRAME_PROCESSOR
68
+
69
+
70
+ def clear_frame_processor() -> None:
71
+ global FRAME_PROCESSOR
72
+
73
+ FRAME_PROCESSOR = None
74
+
75
+
76
+ def get_options(key : Literal['model']) -> Any:
77
+ global OPTIONS
78
+
79
+ if OPTIONS is None:
80
+ OPTIONS =\
81
+ {
82
+ 'model': MODELS[frame_processors_globals.frame_enhancer_model]
83
+ }
84
+ return OPTIONS.get(key)
85
+
86
+
87
+ def set_options(key : Literal['model'], value : Any) -> None:
88
+ global OPTIONS
89
+
90
+ OPTIONS[key] = value
91
+
92
+
93
+ def register_args(program : ArgumentParser) -> None:
94
+ program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = config.get_str_value('frame_processors.frame_enhancer_model', 'real_esrgan_x2plus'), choices = frame_processors_choices.frame_enhancer_models)
95
+ program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = config.get_int_value('frame_processors.frame_enhancer_blend', '80'), choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
96
+
97
+
98
+ def apply_args(program : ArgumentParser) -> None:
99
+ args = program.parse_args()
100
+ frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model
101
+ frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
102
+
103
+
104
+ def pre_check() -> bool:
105
+ if not facefusion.globals.skip_download:
106
+ download_directory_path = resolve_relative_path('../.assets/models')
107
+ model_url = get_options('model').get('url')
108
+ conditional_download(download_directory_path, [ model_url ])
109
+ return True
110
+
111
+
112
+ def post_check() -> bool:
113
+ model_url = get_options('model').get('url')
114
+ model_path = get_options('model').get('path')
115
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
116
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
117
+ return False
118
+ elif not is_file(model_path):
119
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
120
+ return False
121
+ return True
122
+
123
+
124
+ def pre_process(mode : ProcessMode) -> bool:
125
+ if mode == 'output' and not facefusion.globals.output_path:
126
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
127
+ return False
128
+ return True
129
+
130
+
131
+ def post_process() -> None:
132
+ if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate':
133
+ clear_frame_processor()
134
+ read_static_image.cache_clear()
135
+ if facefusion.globals.video_memory_strategy == 'strict':
136
+ clear_face_analyser()
137
+ clear_content_analyser()
138
+
139
+
140
+ def enhance_frame(temp_frame : Frame) -> Frame:
141
+ with THREAD_SEMAPHORE:
142
+ paste_frame, _ = get_frame_processor().enhance(temp_frame)
143
+ temp_frame = blend_frame(temp_frame, paste_frame)
144
+ return temp_frame
145
+
146
+
147
+ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
148
+ frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
149
+ paste_frame_height, paste_frame_width = paste_frame.shape[0:2]
150
+ temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
151
+ temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0)
152
+ return temp_frame
153
+
154
+
155
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
156
+ pass
157
+
158
+
159
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
160
+ return enhance_frame(temp_frame)
161
+
162
+
163
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
164
+ for temp_frame_path in temp_frame_paths:
165
+ temp_frame = read_image(temp_frame_path)
166
+ result_frame = process_frame(None, None, temp_frame)
167
+ write_image(temp_frame_path, result_frame)
168
+ update_progress()
169
+
170
+
171
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
172
+ target_frame = read_static_image(target_path)
173
+ result = process_frame(None, None, target_frame)
174
+ write_image(output_path, result)
175
+
176
+
177
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
178
+ frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/typings.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
4
+ FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
5
+ FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
6
+ FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score', 'distance']
facefusion/typing.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
2
+ from collections import namedtuple
3
+ import numpy
4
+
5
+ Bbox = numpy.ndarray[Any, Any]
6
+ Kps = numpy.ndarray[Any, Any]
7
+ Score = float
8
+ Embedding = numpy.ndarray[Any, Any]
9
+ Face = namedtuple('Face',
10
+ [
11
+ 'bbox',
12
+ 'kps',
13
+ 'score',
14
+ 'embedding',
15
+ 'normed_embedding',
16
+ 'gender',
17
+ 'age'
18
+ ])
19
+ FaceSet = Dict[str, List[Face]]
20
+ FaceStore = TypedDict('FaceStore',
21
+ {
22
+ 'static_faces' : FaceSet,
23
+ 'reference_faces': FaceSet
24
+ })
25
+ Frame = numpy.ndarray[Any, Any]
26
+ Mask = numpy.ndarray[Any, Any]
27
+ Matrix = numpy.ndarray[Any, Any]
28
+
29
+ Fps = float
30
+ Padding = Tuple[int, int, int, int]
31
+ Resolution = Tuple[int, int]
32
+
33
+ Update_Process = Callable[[], None]
34
+ Process_Frames = Callable[[List[str], List[str], Update_Process], None]
35
+
36
+ Template = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512']
37
+ ProcessMode = Literal['output', 'preview', 'stream']
38
+
39
+ LogLevel = Literal['error', 'warn', 'info', 'debug']
40
+ VideoMemoryStrategy = Literal['strict', 'moderate', 'tolerant']
41
+ FaceSelectorMode = Literal['reference', 'one', 'many']
42
+ FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
43
+ FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
44
+ FaceAnalyserGender = Literal['male', 'female']
45
+ FaceDetectorModel = Literal['retinaface', 'yunet']
46
+ FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap']
47
+ FaceMaskType = Literal['box', 'occlusion', 'region']
48
+ FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip']
49
+ TempFrameFormat = Literal['jpg', 'png', 'bmp']
50
+ OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
51
+ OutputVideoPreset = Literal['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow']
52
+
53
+ ModelValue = Dict[str, Any]
54
+ ModelSet = Dict[str, ModelValue]
55
+ OptionsWithModel = TypedDict('OptionsWithModel',
56
+ {
57
+ 'model' : ModelValue
58
+ })
facefusion/uis/__init__.py ADDED
File without changes
facefusion/uis/assets/fixes.css ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ :root:root:root button:not([class])
2
+ {
3
+ border-radius: 0.375rem;
4
+ float: left;
5
+ overflow: hidden;
6
+ width: 100%;
7
+ }
facefusion/uis/assets/overrides.css ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root:root:root input[type="number"]
2
+ {
3
+ max-width: 6rem;
4
+ }
5
+
6
+ :root:root:root [type="checkbox"],
7
+ :root:root:root [type="radio"]
8
+ {
9
+ border-radius: 50%;
10
+ height: 1.125rem;
11
+ width: 1.125rem;
12
+ }
13
+
14
+ :root:root:root input[type="range"]
15
+ {
16
+ height: 0.5rem;
17
+ }
18
+
19
+ :root:root:root input[type="range"]::-moz-range-thumb,
20
+ :root:root:root input[type="range"]::-webkit-slider-thumb
21
+ {
22
+ background: var(--neutral-300);
23
+ border: unset;
24
+ border-radius: 50%;
25
+ height: 1.125rem;
26
+ width: 1.125rem;
27
+ }
28
+
29
+ :root:root:root input[type="range"]::-webkit-slider-thumb
30
+ {
31
+ margin-top: 0.375rem;
32
+ }
33
+
34
+ :root:root:root .grid-wrap.fixed-height
35
+ {
36
+ min-height: unset;
37
+ }
38
+
39
+ :root:root:root .grid-container
40
+ {
41
+ grid-auto-rows: minmax(5em, 1fr);
42
+ grid-template-columns: repeat(var(--grid-cols), minmax(5em, 1fr));
43
+ grid-template-rows: repeat(var(--grid-rows), minmax(5em, 1fr));
44
+ }
facefusion/uis/choices.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from facefusion.uis.typing import WebcamMode
4
+
5
+ common_options : List[str] = [ 'keep-temp', 'skip-audio', 'skip-download' ]
6
+ webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
7
+ webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
facefusion/uis/components/__init__.py ADDED
File without changes
facefusion/uis/components/about.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ from facefusion import metadata, wording
5
+
6
+ ABOUT_BUTTON : Optional[gradio.HTML] = None
7
+ DONATE_BUTTON : Optional[gradio.HTML] = None
8
+
9
+
10
+ def render() -> None:
11
+ global ABOUT_BUTTON
12
+ global DONATE_BUTTON
13
+
14
+ ABOUT_BUTTON = gradio.Button(
15
+ value = metadata.get('name') + ' ' + metadata.get('version'),
16
+ variant = 'primary',
17
+ link = metadata.get('url')
18
+ )
19
+ DONATE_BUTTON = gradio.Button(
20
+ value = wording.get('donate_button_label'),
21
+ link = 'https://donate.facefusion.io',
22
+ size = 'sm'
23
+ )
facefusion/uis/components/benchmark.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, List, Dict, Generator
2
+ import time
3
+ import tempfile
4
+ import statistics
5
+ import gradio
6
+
7
+ import facefusion.globals
8
+ from facefusion import wording
9
+ from facefusion.face_store import clear_static_faces
10
+ from facefusion.processors.frame.core import get_frame_processors_modules
11
+ from facefusion.vision import count_video_frame_total, detect_video_resolution, detect_video_fps, pack_resolution
12
+ from facefusion.core import conditional_process
13
+ from facefusion.memory import limit_system_memory
14
+ from facefusion.normalizer import normalize_output_path
15
+ from facefusion.filesystem import clear_temp
16
+ from facefusion.uis.core import get_ui_component
17
+
18
+ BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
19
+ BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
20
+ BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
21
+ BENCHMARKS : Dict[str, str] =\
22
+ {
23
+ '240p': '.assets/examples/target-240p.mp4',
24
+ '360p': '.assets/examples/target-360p.mp4',
25
+ '540p': '.assets/examples/target-540p.mp4',
26
+ '720p': '.assets/examples/target-720p.mp4',
27
+ '1080p': '.assets/examples/target-1080p.mp4',
28
+ '1440p': '.assets/examples/target-1440p.mp4',
29
+ '2160p': '.assets/examples/target-2160p.mp4'
30
+ }
31
+
32
+
33
+ def render() -> None:
34
+ global BENCHMARK_RESULTS_DATAFRAME
35
+ global BENCHMARK_START_BUTTON
36
+ global BENCHMARK_CLEAR_BUTTON
37
+
38
+ BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
39
+ label = wording.get('benchmark_results_dataframe_label'),
40
+ headers =
41
+ [
42
+ 'target_path',
43
+ 'benchmark_cycles',
44
+ 'average_run',
45
+ 'fastest_run',
46
+ 'slowest_run',
47
+ 'relative_fps'
48
+ ],
49
+ datatype =
50
+ [
51
+ 'str',
52
+ 'number',
53
+ 'number',
54
+ 'number',
55
+ 'number',
56
+ 'number'
57
+ ]
58
+ )
59
+ BENCHMARK_START_BUTTON = gradio.Button(
60
+ value = wording.get('start_button_label'),
61
+ variant = 'primary',
62
+ size = 'sm'
63
+ )
64
+ BENCHMARK_CLEAR_BUTTON = gradio.Button(
65
+ value = wording.get('clear_button_label'),
66
+ size = 'sm'
67
+ )
68
+
69
+
70
+ def listen() -> None:
71
+ benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group')
72
+ benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
73
+ if benchmark_runs_checkbox_group and benchmark_cycles_slider:
74
+ BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME)
75
+ BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME)
76
+
77
+
78
+ def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
79
+ facefusion.globals.source_paths = [ '.assets/examples/source.jpg' ]
80
+ facefusion.globals.temp_frame_format = 'bmp'
81
+ facefusion.globals.output_video_preset = 'ultrafast'
82
+ target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
83
+ benchmark_results = []
84
+ if target_paths:
85
+ pre_process()
86
+ for target_path in target_paths:
87
+ benchmark_results.append(benchmark(target_path, benchmark_cycles))
88
+ yield benchmark_results
89
+ post_process()
90
+
91
+
92
+ def pre_process() -> None:
93
+ if facefusion.globals.system_memory_limit > 0:
94
+ limit_system_memory(facefusion.globals.system_memory_limit)
95
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
96
+ frame_processor_module.get_frame_processor()
97
+
98
+
99
+ def post_process() -> None:
100
+ clear_static_faces()
101
+
102
+
103
+ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
104
+ process_times = []
105
+ total_fps = 0.0
106
+ for index in range(benchmark_cycles):
107
+ facefusion.globals.target_path = target_path
108
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, tempfile.gettempdir())
109
+ target_video_resolution = detect_video_resolution(facefusion.globals.target_path)
110
+ facefusion.globals.output_video_resolution = pack_resolution(target_video_resolution)
111
+ facefusion.globals.output_video_fps = detect_video_fps(facefusion.globals.target_path)
112
+ video_frame_total = count_video_frame_total(facefusion.globals.target_path)
113
+ start_time = time.perf_counter()
114
+ conditional_process()
115
+ end_time = time.perf_counter()
116
+ process_time = end_time - start_time
117
+ total_fps += video_frame_total / process_time
118
+ process_times.append(process_time)
119
+ average_run = round(statistics.mean(process_times), 2)
120
+ fastest_run = round(min(process_times), 2)
121
+ slowest_run = round(max(process_times), 2)
122
+ relative_fps = round(total_fps / benchmark_cycles, 2)
123
+ return\
124
+ [
125
+ facefusion.globals.target_path,
126
+ benchmark_cycles,
127
+ average_run,
128
+ fastest_run,
129
+ slowest_run,
130
+ relative_fps
131
+ ]
132
+
133
+
134
+ def clear() -> gradio.Dataframe:
135
+ if facefusion.globals.target_path:
136
+ clear_temp(facefusion.globals.target_path)
137
+ return gradio.Dataframe(value = None)
facefusion/uis/components/benchmark_options.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ from facefusion import wording
5
+ from facefusion.uis.core import register_ui_component
6
+ from facefusion.uis.components.benchmark import BENCHMARKS
7
+
8
+ BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
9
+ BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
10
+
11
+
12
+ def render() -> None:
13
+ global BENCHMARK_RUNS_CHECKBOX_GROUP
14
+ global BENCHMARK_CYCLES_SLIDER
15
+
16
+ BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
17
+ label = wording.get('benchmark_runs_checkbox_group_label'),
18
+ value = list(BENCHMARKS.keys()),
19
+ choices = list(BENCHMARKS.keys())
20
+ )
21
+ BENCHMARK_CYCLES_SLIDER = gradio.Slider(
22
+ label = wording.get('benchmark_cycles_slider_label'),
23
+ value = 5,
24
+ step = 1,
25
+ minimum = 1,
26
+ maximum = 10
27
+ )
28
+ register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP)
29
+ register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
facefusion/uis/components/common_options.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ from facefusion import wording
6
+ from facefusion.uis import choices as uis_choices
7
+
8
+ COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
9
+
10
+
11
+ def render() -> None:
12
+ global COMMON_OPTIONS_CHECKBOX_GROUP
13
+
14
+ value = []
15
+ if facefusion.globals.keep_temp:
16
+ value.append('keep-temp')
17
+ if facefusion.globals.skip_audio:
18
+ value.append('skip-audio')
19
+ if facefusion.globals.skip_download:
20
+ value.append('skip-download')
21
+ COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
22
+ label = wording.get('common_options_checkbox_group_label'),
23
+ choices = uis_choices.common_options,
24
+ value = value
25
+ )
26
+
27
+
28
+ def listen() -> None:
29
+ COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
30
+
31
+
32
+ def update(common_options : List[str]) -> None:
33
+ facefusion.globals.keep_temp = 'keep-temp' in common_options
34
+ facefusion.globals.skip_audio = 'skip-audio' in common_options
35
+ facefusion.globals.skip_download = 'skip-download' in common_options
facefusion/uis/components/execution.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import gradio
3
+ import onnxruntime
4
+
5
+ import facefusion.globals
6
+ from facefusion import wording
7
+ from facefusion.face_analyser import clear_face_analyser
8
+ from facefusion.processors.frame.core import clear_frame_processors_modules
9
+ from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
10
+
11
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
12
+
13
+
14
+ def render() -> None:
15
+ global EXECUTION_PROVIDERS_CHECKBOX_GROUP
16
+
17
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
18
+ label = wording.get('execution_providers_checkbox_group_label'),
19
+ choices = encode_execution_providers(onnxruntime.get_available_providers()),
20
+ value = encode_execution_providers(facefusion.globals.execution_providers)
21
+ )
22
+
23
+
24
+ def listen() -> None:
25
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
26
+
27
+
28
+ def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup:
29
+ clear_face_analyser()
30
+ clear_frame_processors_modules()
31
+ if not execution_providers:
32
+ execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
33
+ facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
34
+ return gradio.CheckboxGroup(value = execution_providers)
facefusion/uis/components/execution_queue_count.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+
8
+ EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
9
+
10
+
11
+ def render() -> None:
12
+ global EXECUTION_QUEUE_COUNT_SLIDER
13
+
14
+ EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
15
+ label = wording.get('execution_queue_count_slider_label'),
16
+ value = facefusion.globals.execution_queue_count,
17
+ step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0],
18
+ minimum = facefusion.choices.execution_queue_count_range[0],
19
+ maximum = facefusion.choices.execution_queue_count_range[-1]
20
+ )
21
+
22
+
23
+ def listen() -> None:
24
+ EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
25
+
26
+
27
+ def update_execution_queue_count(execution_queue_count : int = 1) -> None:
28
+ facefusion.globals.execution_queue_count = execution_queue_count
facefusion/uis/components/execution_thread_count.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+
8
+ EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
9
+
10
+
11
+ def render() -> None:
12
+ global EXECUTION_THREAD_COUNT_SLIDER
13
+
14
+ EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
15
+ label = wording.get('execution_thread_count_slider_label'),
16
+ value = facefusion.globals.execution_thread_count,
17
+ step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0],
18
+ minimum = facefusion.choices.execution_thread_count_range[0],
19
+ maximum = facefusion.choices.execution_thread_count_range[-1]
20
+ )
21
+
22
+
23
+ def listen() -> None:
24
+ EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
25
+
26
+
27
+ def update_execution_thread_count(execution_thread_count : int = 1) -> None:
28
+ facefusion.globals.execution_thread_count = execution_thread_count
29
+
facefusion/uis/components/face_analyser.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import gradio
4
+
5
+ import facefusion.globals
6
+ import facefusion.choices
7
+ from facefusion import wording
8
+ from facefusion.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel
9
+ from facefusion.uis.core import register_ui_component
10
+
11
+ FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
12
+ FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
13
+ FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
14
+ FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
15
+ FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None
16
+ FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
17
+
18
+
19
+ def render() -> None:
20
+ global FACE_ANALYSER_ORDER_DROPDOWN
21
+ global FACE_ANALYSER_AGE_DROPDOWN
22
+ global FACE_ANALYSER_GENDER_DROPDOWN
23
+ global FACE_DETECTOR_SIZE_DROPDOWN
24
+ global FACE_DETECTOR_SCORE_SLIDER
25
+ global FACE_DETECTOR_MODEL_DROPDOWN
26
+
27
+ with gradio.Row():
28
+ FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown(
29
+ label = wording.get('face_analyser_order_dropdown_label'),
30
+ choices = facefusion.choices.face_analyser_orders,
31
+ value = facefusion.globals.face_analyser_order
32
+ )
33
+ FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
34
+ label = wording.get('face_analyser_age_dropdown_label'),
35
+ choices = [ 'none' ] + facefusion.choices.face_analyser_ages,
36
+ value = facefusion.globals.face_analyser_age or 'none'
37
+ )
38
+ FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
39
+ label = wording.get('face_analyser_gender_dropdown_label'),
40
+ choices = [ 'none' ] + facefusion.choices.face_analyser_genders,
41
+ value = facefusion.globals.face_analyser_gender or 'none'
42
+ )
43
+ FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
44
+ label = wording.get('face_detector_model_dropdown_label'),
45
+ choices = facefusion.choices.face_detector_models,
46
+ value = facefusion.globals.face_detector_model
47
+ )
48
+ FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(
49
+ label = wording.get('face_detector_size_dropdown_label'),
50
+ choices = facefusion.choices.face_detector_sizes,
51
+ value = facefusion.globals.face_detector_size
52
+ )
53
+ FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
54
+ label = wording.get('face_detector_score_slider_label'),
55
+ value = facefusion.globals.face_detector_score,
56
+ step = facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0],
57
+ minimum = facefusion.choices.face_detector_score_range[0],
58
+ maximum = facefusion.choices.face_detector_score_range[-1]
59
+ )
60
+ register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN)
61
+ register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
62
+ register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
63
+ register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN)
64
+ register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN)
65
+ register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER)
66
+
67
+
68
+ def listen() -> None:
69
+ FACE_ANALYSER_ORDER_DROPDOWN.change(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN)
70
+ FACE_ANALYSER_AGE_DROPDOWN.change(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
71
+ FACE_ANALYSER_GENDER_DROPDOWN.change(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
72
+ FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN)
73
+ FACE_DETECTOR_SIZE_DROPDOWN.change(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
74
+ FACE_DETECTOR_SCORE_SLIDER.change(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
75
+
76
+
77
+ def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None:
78
+ facefusion.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None
79
+
80
+
81
+ def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None:
82
+ facefusion.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None
83
+
84
+
85
+ def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None:
86
+ facefusion.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None
87
+
88
+
89
+ def update_face_detector_model(face_detector_model : FaceDetectorModel) -> None:
90
+ facefusion.globals.face_detector_model = face_detector_model
91
+
92
+
93
+ def update_face_detector_size(face_detector_size : str) -> None:
94
+ facefusion.globals.face_detector_size = face_detector_size
95
+
96
+
97
+ def update_face_detector_score(face_detector_score : float) -> None:
98
+ facefusion.globals.face_detector_score = face_detector_score
facefusion/uis/components/face_masker.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, List
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+ from facefusion.typing import FaceMaskType, FaceMaskRegion
8
+ from facefusion.uis.core import register_ui_component
9
+
10
+ FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
11
+ FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
12
+ FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None
13
+ FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None
14
+ FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
15
+ FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
16
+ FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
17
+ FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
18
+ FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
19
+
20
+
21
+ def render() -> None:
22
+ global FACE_MASK_TYPES_CHECKBOX_GROUP
23
+ global FACE_MASK_BLUR_SLIDER
24
+ global FACE_MASK_BOX_GROUP
25
+ global FACE_MASK_REGION_GROUP
26
+ global FACE_MASK_PADDING_TOP_SLIDER
27
+ global FACE_MASK_PADDING_RIGHT_SLIDER
28
+ global FACE_MASK_PADDING_BOTTOM_SLIDER
29
+ global FACE_MASK_PADDING_LEFT_SLIDER
30
+ global FACE_MASK_REGION_CHECKBOX_GROUP
31
+
32
+ has_box_mask = 'box' in facefusion.globals.face_mask_types
33
+ has_region_mask = 'region' in facefusion.globals.face_mask_types
34
+ FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
35
+ label = wording.get('face_mask_types_checkbox_group_label'),
36
+ choices = facefusion.choices.face_mask_types,
37
+ value = facefusion.globals.face_mask_types
38
+ )
39
+ with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
40
+ FACE_MASK_BLUR_SLIDER = gradio.Slider(
41
+ label = wording.get('face_mask_blur_slider_label'),
42
+ step = facefusion.choices.face_mask_blur_range[1] - facefusion.choices.face_mask_blur_range[0],
43
+ minimum = facefusion.choices.face_mask_blur_range[0],
44
+ maximum = facefusion.choices.face_mask_blur_range[-1],
45
+ value = facefusion.globals.face_mask_blur
46
+ )
47
+ with gradio.Row():
48
+ FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
49
+ label = wording.get('face_mask_padding_top_slider_label'),
50
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
51
+ minimum = facefusion.choices.face_mask_padding_range[0],
52
+ maximum = facefusion.choices.face_mask_padding_range[-1],
53
+ value = facefusion.globals.face_mask_padding[0]
54
+ )
55
+ FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
56
+ label = wording.get('face_mask_padding_right_slider_label'),
57
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
58
+ minimum = facefusion.choices.face_mask_padding_range[0],
59
+ maximum = facefusion.choices.face_mask_padding_range[-1],
60
+ value = facefusion.globals.face_mask_padding[1]
61
+ )
62
+ with gradio.Row():
63
+ FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
64
+ label = wording.get('face_mask_padding_bottom_slider_label'),
65
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
66
+ minimum = facefusion.choices.face_mask_padding_range[0],
67
+ maximum = facefusion.choices.face_mask_padding_range[-1],
68
+ value = facefusion.globals.face_mask_padding[2]
69
+ )
70
+ FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
71
+ label = wording.get('face_mask_padding_left_slider_label'),
72
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
73
+ minimum = facefusion.choices.face_mask_padding_range[0],
74
+ maximum = facefusion.choices.face_mask_padding_range[-1],
75
+ value = facefusion.globals.face_mask_padding[3]
76
+ )
77
+ with gradio.Row():
78
+ FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
79
+ label = wording.get('face_mask_region_checkbox_group_label'),
80
+ choices = facefusion.choices.face_mask_regions,
81
+ value = facefusion.globals.face_mask_regions,
82
+ visible = has_region_mask
83
+ )
84
+ register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
85
+ register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
86
+ register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
87
+ register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
88
+ register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
89
+ register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
90
+ register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP)
91
+
92
+
93
+ def listen() -> None:
94
+ FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
95
+ FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
96
+ FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
97
+ face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
98
+ for face_mask_padding_slider in face_mask_padding_sliders:
99
+ face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders)
100
+
101
+
102
+ def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:
103
+ if not face_mask_types:
104
+ face_mask_types = facefusion.choices.face_mask_types
105
+ facefusion.globals.face_mask_types = face_mask_types
106
+ has_box_mask = 'box' in face_mask_types
107
+ has_region_mask = 'region' in face_mask_types
108
+ return gradio.CheckboxGroup(value = face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask)
109
+
110
+
111
+ def update_face_mask_blur(face_mask_blur : float) -> None:
112
+ facefusion.globals.face_mask_blur = face_mask_blur
113
+
114
+
115
+ def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None:
116
+ facefusion.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left)
117
+
118
+
119
+ def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
120
+ if not face_mask_regions:
121
+ face_mask_regions = facefusion.choices.face_mask_regions
122
+ facefusion.globals.face_mask_regions = face_mask_regions
123
+ return gradio.CheckboxGroup(value = face_mask_regions)
facefusion/uis/components/face_selector.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Any, Dict
2
+
3
+ import gradio
4
+
5
+ import facefusion.globals
6
+ import facefusion.choices
7
+ from facefusion import wording
8
+ from facefusion.face_store import clear_static_faces, clear_reference_faces
9
+ from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
10
+ from facefusion.filesystem import is_image, is_video
11
+ from facefusion.face_analyser import get_many_faces
12
+ from facefusion.typing import Frame, FaceSelectorMode
13
+ from facefusion.uis.core import get_ui_component, register_ui_component
14
+ from facefusion.uis.typing import ComponentName
15
+
16
+ FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
17
+ REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
18
+ REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
19
+
20
+
21
+ def render() -> None:
22
+ global FACE_SELECTOR_MODE_DROPDOWN
23
+ global REFERENCE_FACE_POSITION_GALLERY
24
+ global REFERENCE_FACE_DISTANCE_SLIDER
25
+
26
+ reference_face_gallery_args: Dict[str, Any] =\
27
+ {
28
+ 'label': wording.get('reference_face_gallery_label'),
29
+ 'object_fit': 'cover',
30
+ 'columns': 8,
31
+ 'allow_preview': False,
32
+ 'visible': 'reference' in facefusion.globals.face_selector_mode
33
+ }
34
+ if is_image(facefusion.globals.target_path):
35
+ reference_frame = read_static_image(facefusion.globals.target_path)
36
+ reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
37
+ if is_video(facefusion.globals.target_path):
38
+ reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
39
+ reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
40
+ FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
41
+ label = wording.get('face_selector_mode_dropdown_label'),
42
+ choices = facefusion.choices.face_selector_modes,
43
+ value = facefusion.globals.face_selector_mode
44
+ )
45
+ REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
46
+ REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
47
+ label = wording.get('reference_face_distance_slider_label'),
48
+ value = facefusion.globals.reference_face_distance,
49
+ step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0],
50
+ minimum = facefusion.choices.reference_face_distance_range[0],
51
+ maximum = facefusion.choices.reference_face_distance_range[-1],
52
+ visible = 'reference' in facefusion.globals.face_selector_mode
53
+ )
54
+ register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
55
+ register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
56
+ register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
57
+
58
+
59
+ def listen() -> None:
60
+ FACE_SELECTOR_MODE_DROPDOWN.change(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
61
+ REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
62
+ REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
63
+ multi_component_names : List[ComponentName] =\
64
+ [
65
+ 'target_image',
66
+ 'target_video'
67
+ ]
68
+ for component_name in multi_component_names:
69
+ component = get_ui_component(component_name)
70
+ if component:
71
+ for method in [ 'upload', 'change', 'clear' ]:
72
+ getattr(component, method)(update_reference_face_position)
73
+ getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
74
+ change_one_component_names : List[ComponentName] =\
75
+ [
76
+ 'face_analyser_order_dropdown',
77
+ 'face_analyser_age_dropdown',
78
+ 'face_analyser_gender_dropdown'
79
+ ]
80
+ for component_name in change_one_component_names:
81
+ component = get_ui_component(component_name)
82
+ if component:
83
+ component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
84
+ change_two_component_names : List[ComponentName] =\
85
+ [
86
+ 'face_detector_model_dropdown',
87
+ 'face_detector_size_dropdown',
88
+ 'face_detector_score_slider'
89
+ ]
90
+ for component_name in change_two_component_names:
91
+ component = get_ui_component(component_name)
92
+ if component:
93
+ component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
94
+ preview_frame_slider = get_ui_component('preview_frame_slider')
95
+ if preview_frame_slider:
96
+ preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
97
+ preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
98
+
99
+
100
+ def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
101
+ if face_selector_mode == 'reference':
102
+ facefusion.globals.face_selector_mode = face_selector_mode
103
+ return gradio.Gallery(visible = True), gradio.Slider(visible = True)
104
+ if face_selector_mode == 'one':
105
+ facefusion.globals.face_selector_mode = face_selector_mode
106
+ return gradio.Gallery(visible = False), gradio.Slider(visible = False)
107
+ if face_selector_mode == 'many':
108
+ facefusion.globals.face_selector_mode = face_selector_mode
109
+ return gradio.Gallery(visible = False), gradio.Slider(visible = False)
110
+
111
+
112
+ def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
113
+ clear_reference_faces()
114
+ clear_static_faces()
115
+ update_reference_face_position(event.index)
116
+ return update_reference_position_gallery()
117
+
118
+
119
+ def update_reference_face_position(reference_face_position : int = 0) -> None:
120
+ facefusion.globals.reference_face_position = reference_face_position
121
+
122
+
123
+ def update_reference_face_distance(reference_face_distance : float) -> None:
124
+ facefusion.globals.reference_face_distance = reference_face_distance
125
+
126
+
127
+ def update_reference_frame_number(reference_frame_number : int) -> None:
128
+ facefusion.globals.reference_frame_number = reference_frame_number
129
+
130
+
131
+ def clear_and_update_reference_position_gallery() -> gradio.Gallery:
132
+ clear_reference_faces()
133
+ clear_static_faces()
134
+ return update_reference_position_gallery()
135
+
136
+
137
+ def update_reference_position_gallery() -> gradio.Gallery:
138
+ gallery_frames = []
139
+ if is_image(facefusion.globals.target_path):
140
+ reference_frame = read_static_image(facefusion.globals.target_path)
141
+ gallery_frames = extract_gallery_frames(reference_frame)
142
+ if is_video(facefusion.globals.target_path):
143
+ reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
144
+ gallery_frames = extract_gallery_frames(reference_frame)
145
+ if gallery_frames:
146
+ return gradio.Gallery(value = gallery_frames)
147
+ return gradio.Gallery(value = None)
148
+
149
+
150
+ def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
151
+ crop_frames = []
152
+ faces = get_many_faces(reference_frame)
153
+ for face in faces:
154
+ start_x, start_y, end_x, end_y = map(int, face.bbox)
155
+ padding_x = int((end_x - start_x) * 0.25)
156
+ padding_y = int((end_y - start_y) * 0.25)
157
+ start_x = max(0, start_x - padding_x)
158
+ start_y = max(0, start_y - padding_y)
159
+ end_x = max(0, end_x + padding_x)
160
+ end_y = max(0, end_y + padding_y)
161
+ crop_frame = reference_frame[start_y:end_y, start_x:end_x]
162
+ crop_frame = normalize_frame_color(crop_frame)
163
+ crop_frames.append(crop_frame)
164
+ return crop_frames
facefusion/uis/components/frame_processors.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ from facefusion import wording
6
+ from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
7
+ from facefusion.filesystem import list_directory
8
+ from facefusion.uis.core import register_ui_component
9
+
10
+ FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
11
+
12
+
13
+ def render() -> None:
14
+ global FRAME_PROCESSORS_CHECKBOX_GROUP
15
+
16
+ FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
17
+ label = wording.get('frame_processors_checkbox_group_label'),
18
+ choices = sort_frame_processors(facefusion.globals.frame_processors),
19
+ value = facefusion.globals.frame_processors
20
+ )
21
+ register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
22
+
23
+
24
+ def listen() -> None:
25
+ FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
26
+
27
+
28
+ def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup:
29
+ facefusion.globals.frame_processors = frame_processors
30
+ clear_frame_processors_modules()
31
+ for frame_processor in frame_processors:
32
+ frame_processor_module = load_frame_processor_module(frame_processor)
33
+ if not frame_processor_module.pre_check():
34
+ return gradio.CheckboxGroup()
35
+ return gradio.CheckboxGroup(value = frame_processors, choices = sort_frame_processors(frame_processors))
36
+
37
+
38
+ def sort_frame_processors(frame_processors : List[str]) -> list[str]:
39
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
40
+ return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
facefusion/uis/components/frame_processors_options.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ from facefusion import wording
6
+ from facefusion.processors.frame.core import load_frame_processor_module
7
+ from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
8
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
9
+ from facefusion.uis.core import get_ui_component, register_ui_component
10
+
11
+ FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
12
+ FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
13
+ FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
14
+ FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
15
+ FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
16
+ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
17
+
18
+
19
+ def render() -> None:
20
+ global FACE_SWAPPER_MODEL_DROPDOWN
21
+ global FACE_ENHANCER_MODEL_DROPDOWN
22
+ global FACE_ENHANCER_BLEND_SLIDER
23
+ global FRAME_ENHANCER_MODEL_DROPDOWN
24
+ global FRAME_ENHANCER_BLEND_SLIDER
25
+ global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
26
+
27
+ FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
28
+ label = wording.get('face_swapper_model_dropdown_label'),
29
+ choices = frame_processors_choices.face_swapper_models,
30
+ value = frame_processors_globals.face_swapper_model,
31
+ visible = 'face_swapper' in facefusion.globals.frame_processors
32
+ )
33
+ FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
34
+ label = wording.get('face_enhancer_model_dropdown_label'),
35
+ choices = frame_processors_choices.face_enhancer_models,
36
+ value = frame_processors_globals.face_enhancer_model,
37
+ visible = 'face_enhancer' in facefusion.globals.frame_processors
38
+ )
39
+ FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
40
+ label = wording.get('face_enhancer_blend_slider_label'),
41
+ value = frame_processors_globals.face_enhancer_blend,
42
+ step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
43
+ minimum = frame_processors_choices.face_enhancer_blend_range[0],
44
+ maximum = frame_processors_choices.face_enhancer_blend_range[-1],
45
+ visible = 'face_enhancer' in facefusion.globals.frame_processors
46
+ )
47
+ FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
48
+ label = wording.get('frame_enhancer_model_dropdown_label'),
49
+ choices = frame_processors_choices.frame_enhancer_models,
50
+ value = frame_processors_globals.frame_enhancer_model,
51
+ visible = 'frame_enhancer' in facefusion.globals.frame_processors
52
+ )
53
+ FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
54
+ label = wording.get('frame_enhancer_blend_slider_label'),
55
+ value = frame_processors_globals.frame_enhancer_blend,
56
+ step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
57
+ minimum = frame_processors_choices.frame_enhancer_blend_range[0],
58
+ maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
59
+ visible = 'frame_enhancer' in facefusion.globals.frame_processors
60
+ )
61
+ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
62
+ label = wording.get('face_debugger_items_checkbox_group_label'),
63
+ choices = frame_processors_choices.face_debugger_items,
64
+ value = frame_processors_globals.face_debugger_items,
65
+ visible = 'face_debugger' in facefusion.globals.frame_processors
66
+ )
67
+
68
+ register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
69
+ register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
70
+ register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
71
+ register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
72
+ register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
73
+ register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
74
+
75
+
76
+ def listen() -> None:
77
+ FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
78
+ FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
79
+ FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
80
+ FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
81
+ FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
82
+ FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
83
+ frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
84
+ if frame_processors_checkbox_group:
85
+ frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP ])
86
+
87
+
88
+ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
89
+ frame_processors_globals.face_swapper_model = face_swapper_model
90
+ if face_swapper_model == 'blendswap_256':
91
+ facefusion.globals.face_recognizer_model = 'arcface_blendswap'
92
+ if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
93
+ facefusion.globals.face_recognizer_model = 'arcface_inswapper'
94
+ if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
95
+ facefusion.globals.face_recognizer_model = 'arcface_simswap'
96
+ face_swapper_module = load_frame_processor_module('face_swapper')
97
+ face_swapper_module.clear_frame_processor()
98
+ face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
99
+ if not face_swapper_module.pre_check():
100
+ return gradio.Dropdown()
101
+ return gradio.Dropdown(value = face_swapper_model)
102
+
103
+
104
+ def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
105
+ frame_processors_globals.face_enhancer_model = face_enhancer_model
106
+ face_enhancer_module = load_frame_processor_module('face_enhancer')
107
+ face_enhancer_module.clear_frame_processor()
108
+ face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
109
+ if not face_enhancer_module.pre_check():
110
+ return gradio.Dropdown()
111
+ return gradio.Dropdown(value = face_enhancer_model)
112
+
113
+
114
+ def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
115
+ frame_processors_globals.face_enhancer_blend = face_enhancer_blend
116
+
117
+
118
+ def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
119
+ frame_processors_globals.frame_enhancer_model = frame_enhancer_model
120
+ frame_enhancer_module = load_frame_processor_module('frame_enhancer')
121
+ frame_enhancer_module.clear_frame_processor()
122
+ frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model])
123
+ if not frame_enhancer_module.pre_check():
124
+ return gradio.Dropdown()
125
+ return gradio.Dropdown(value = frame_enhancer_model)
126
+
127
+
128
+ def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None:
129
+ frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend
130
+
131
+
132
+ def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
133
+ frame_processors_globals.face_debugger_items = face_debugger_items
134
+
135
+
136
+ def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]:
137
+ has_face_swapper = 'face_swapper' in frame_processors
138
+ has_face_enhancer = 'face_enhancer' in frame_processors
139
+ has_frame_enhancer = 'frame_enhancer' in frame_processors
140
+ has_face_debugger = 'face_debugger' in frame_processors
141
+ return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.CheckboxGroup(visible = has_face_debugger)
facefusion/uis/components/memory.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion.typing import VideoMemoryStrategy
7
+ from facefusion import wording
8
+
9
+ VIDEO_MEMORY_STRATEGY : Optional[gradio.Dropdown] = None
10
+ SYSTEM_MEMORY_LIMIT_SLIDER : Optional[gradio.Slider] = None
11
+
12
+
13
+ def render() -> None:
14
+ global VIDEO_MEMORY_STRATEGY
15
+ global SYSTEM_MEMORY_LIMIT_SLIDER
16
+
17
+ VIDEO_MEMORY_STRATEGY = gradio.Dropdown(
18
+ label = wording.get('video_memory_strategy_dropdown_label'),
19
+ choices = facefusion.choices.video_memory_strategies,
20
+ value = facefusion.globals.video_memory_strategy
21
+ )
22
+ SYSTEM_MEMORY_LIMIT_SLIDER = gradio.Slider(
23
+ label = wording.get('system_memory_limit_slider_label'),
24
+ step =facefusion.choices.system_memory_limit_range[1] - facefusion.choices.system_memory_limit_range[0],
25
+ minimum = facefusion.choices.system_memory_limit_range[0],
26
+ maximum = facefusion.choices.system_memory_limit_range[-1],
27
+ value = facefusion.globals.system_memory_limit
28
+ )
29
+
30
+
31
+ def listen() -> None:
32
+ VIDEO_MEMORY_STRATEGY.change(update_video_memory_strategy, inputs = VIDEO_MEMORY_STRATEGY)
33
+ SYSTEM_MEMORY_LIMIT_SLIDER.change(update_system_memory_limit, inputs = SYSTEM_MEMORY_LIMIT_SLIDER)
34
+
35
+
36
+ def update_video_memory_strategy(video_memory_strategy : VideoMemoryStrategy) -> None:
37
+ facefusion.globals.video_memory_strategy = video_memory_strategy
38
+
39
+
40
+ def update_system_memory_limit(system_memory_limit : int) -> None:
41
+ facefusion.globals.system_memory_limit = system_memory_limit