From 365f3d790ae705ce7af51dc4400d45a180231652 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 09:01:03 +0200 Subject: [PATCH 01/21] Introduce pre_check(), Cleanup requirements.txt --- core/processor.py | 5 +---- requirements.txt | 3 ++- run.py | 40 ++++++++++++++++++++++++++++------------ 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/core/processor.py b/core/processor.py index d67b1cf..82d61c1 100644 --- a/core/processor.py +++ b/core/processor.py @@ -5,10 +5,7 @@ import core.globals from core.config import get_face from core.utils import rreplace -if os.path.isfile('inswapper_128.onnx'): - face_swapper = insightface.model_zoo.get_model('inswapper_128.onnx', providers=core.globals.providers) -else: - quit('File "inswapper_128.onnx" does not exist!') +face_swapper = insightface.model_zoo.get_model('inswapper_128.onnx', providers=core.globals.providers) def process_video(source_img, frame_paths): diff --git a/requirements.txt b/requirements.txt index 14bb741..a38b874 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,5 +4,6 @@ onnx==1.14.0 insightface==0.7.3 psutil==5.9.5 tk==0.1.0 -pillow==9.0.1 +pillow==9.5.0 torch==2.0.1 +onnxruntime-gpu==1.15.0 \ No newline at end of file diff --git a/run.py b/run.py index adde8d9..525d9a5 100644 --- a/run.py +++ b/run.py @@ -1,19 +1,9 @@ #!/usr/bin/env python3 import sys import time +import torch import shutil import core.globals - -if not shutil.which('ffmpeg'): - print('ffmpeg is not installed. Read the docs: https://github.com/s0md3v/roop#installation.\n' * 10) - quit() -if '--gpu' not in sys.argv: - core.globals.providers = ['CPUExecutionProvider'] -elif 'ROCMExecutionProvider' not in core.globals.providers: - import torch - if not torch.cuda.is_available(): - quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.") - import glob import argparse import multiprocessing as mp @@ -45,12 +35,35 @@ parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_fr for name, value in vars(parser.parse_args()).items(): args[name] = value - sep = "/" if os.name == "nt": sep = "\\" +def pre_check(): + if not shutil.which('ffmpeg'): + quit('ffmpeg is not installed!') + if os.path.isfile('../inswapper_128.onnx'): + quit('File "inswapper_128.onnx" does not exist!') + if '--gpu' in sys.argv: + CUDA_VERSION = torch.version.cuda + CUDNN_VERSION = torch.backends.cudnn.version() + + if 'ROCMExecutionProvider' not in core.globals.providers: + if CUDA_VERSION > '11.8': + quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.") + if CUDA_VERSION < '11.6': + quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8.") + if CUDNN_VERSION < 8220: + quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") + if CUDNN_VERSION > 8910: + quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1") + if not torch.cuda.is_available(): + quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.") + else: + core.globals.providers = ['CPUExecutionProvider'] + + def start_processing(): start_time = time.time() if args['gpu']: @@ -73,6 +86,7 @@ def start_processing(): print(flush=True) print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True) + def preview_image(image_path): img = Image.open(image_path) img = img.resize((180, 180), Image.ANTIALIAS) @@ -183,6 +197,8 @@ def start(): if __name__ == "__main__": global status_label, window + + pre_check() if args['source_img']: start() quit() From fe9d6b9cae78a7f6621970069f912489a9f7a231 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 09:05:13 +0200 Subject: [PATCH 02/21] Introduce pre_check(), Cleanup requirements.txt --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 525d9a5..035879c 100644 --- a/run.py +++ b/run.py @@ -52,7 +52,7 @@ def pre_check(): if 'ROCMExecutionProvider' not in core.globals.providers: if CUDA_VERSION > '11.8': quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.") - if CUDA_VERSION < '11.6': + if CUDA_VERSION < '11.0': quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8.") if CUDNN_VERSION < 8220: quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") From 064f5c5144052fc32ef483a8fe9144f6ba92d8d9 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 09:07:12 +0200 Subject: [PATCH 03/21] Introduce pre_check(), Cleanup requirements.txt --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 035879c..300daab 100644 --- a/run.py +++ b/run.py @@ -52,7 +52,7 @@ def pre_check(): if 'ROCMExecutionProvider' not in core.globals.providers: if CUDA_VERSION > '11.8': quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.") - if CUDA_VERSION < '11.0': + if CUDA_VERSION < '11.4': quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8.") if CUDNN_VERSION < 8220: quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") From 7227297b45c7daaadf86fe0ddf6ae04430214646 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 10:15:25 +0200 Subject: [PATCH 04/21] Add resource limitations --- README.md | 23 +++++++++++++++-------- run.py | 17 +++++++++++++++++ 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b65f4a7..d61d4c4 100644 --- a/README.md +++ b/README.md @@ -25,16 +25,23 @@ Don't touch the FPS checkbox unless you know what you are doing. Additional command line arguments are given below: ``` --h, --help show this help message and exit --f SOURCE_IMG, --face SOURCE_IMG +options: + -h, --help show this help message and exit + -f SOURCE_IMG, --face SOURCE_IMG use this face --t TARGET_PATH, --target TARGET_PATH + -t TARGET_PATH, --target TARGET_PATH replace this face --o OUTPUT_FILE, --output OUTPUT_FILE - save output to this file ---keep-fps keep original fps ---gpu use gpu ---keep-frames don't delete frames directory + -o OUTPUT_FILE, --output OUTPUT_FILE + save output to this file + --keep-fps maintain original fps + --gpu use gpu + --keep-frames keep frames directory + --max-memory MAX_MEMORY + set max memory + --max-cpu-cores MAX_CPU_CORES + set max cpu cores + --max-cpu-usage MAX_CPU_USAGE + set cpu usage in percent ``` Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode. diff --git a/run.py b/run.py index 300daab..9fabf17 100644 --- a/run.py +++ b/run.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +import multiprocessing import sys import time import torch @@ -20,6 +21,7 @@ import psutil import cv2 import threading from PIL import Image, ImageTk +import resource pool = None args = {} @@ -31,6 +33,9 @@ parser.add_argument('-o', '--output', help='save output to this file', dest='out parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) +parser.add_argument('--max-memory', help='set max memory', default=16, type=int) +parser.add_argument('--max-cpu-cores', help='set max cpu cores', default=multiprocessing.cpu_count(), type=int) +parser.add_argument('--max-cpu-usage', help='set cpu usage in percent', default=100, type=int) for name, value in vars(parser.parse_args()).items(): args[name] = value @@ -40,6 +45,16 @@ if os.name == "nt": sep = "\\" +def limit_resources(): + current_cpu_usage, current_cpu_cores = resource.getrlimit(resource.RLIMIT_CPU) + if args['max_memory'] < 1: + resource.setrlimit(resource.RLIMIT_DATA, (args['max_memory'] * 1024 * 1024 * 1024, -1)) + if args['max_cpu_usage'] < 1: + resource.setrlimit(resource.RLIMIT_CPU, (args['max_cpu_usage'], current_cpu_cores)) + if args['max_cpu_cores'] < multiprocessing.cpu_count(): + resource.setrlimit(resource.RLIMIT_CPU, (current_cpu_usage, args['max_cpu_cores'])) + + def pre_check(): if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') @@ -199,6 +214,8 @@ if __name__ == "__main__": global status_label, window pre_check() + limit_resources() + if args['source_img']: start() quit() From 48e1f93891ea2067ef8f9d6f28f64a8349e638c7 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 10:59:23 +0200 Subject: [PATCH 05/21] Revert: CPU limits not working --- README.md | 4 ---- run.py | 12 +++--------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index d61d4c4..49f7cb7 100644 --- a/README.md +++ b/README.md @@ -38,10 +38,6 @@ options: --keep-frames keep frames directory --max-memory MAX_MEMORY set max memory - --max-cpu-cores MAX_CPU_CORES - set max cpu cores - --max-cpu-usage MAX_CPU_USAGE - set cpu usage in percent ``` Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode. diff --git a/run.py b/run.py index 9fabf17..5d65b7d 100644 --- a/run.py +++ b/run.py @@ -34,8 +34,6 @@ parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='set max memory', default=16, type=int) -parser.add_argument('--max-cpu-cores', help='set max cpu cores', default=multiprocessing.cpu_count(), type=int) -parser.add_argument('--max-cpu-usage', help='set cpu usage in percent', default=100, type=int) for name, value in vars(parser.parse_args()).items(): args[name] = value @@ -46,13 +44,9 @@ if os.name == "nt": def limit_resources(): - current_cpu_usage, current_cpu_cores = resource.getrlimit(resource.RLIMIT_CPU) - if args['max_memory'] < 1: - resource.setrlimit(resource.RLIMIT_DATA, (args['max_memory'] * 1024 * 1024 * 1024, -1)) - if args['max_cpu_usage'] < 1: - resource.setrlimit(resource.RLIMIT_CPU, (args['max_cpu_usage'], current_cpu_cores)) - if args['max_cpu_cores'] < multiprocessing.cpu_count(): - resource.setrlimit(resource.RLIMIT_CPU, (current_cpu_usage, args['max_cpu_cores'])) + if args['max_memory'] <= 1: + memory = args['max_memory'] * 1024 * 1024 * 1024 + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) def pre_check(): From 0f3bc95913b2fd4ae9cc9c185705bcad472265e2 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 13:47:44 +0200 Subject: [PATCH 06/21] max-memory support for Windows --- run.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/run.py b/run.py index 5d65b7d..327bee8 100644 --- a/run.py +++ b/run.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -import multiprocessing + +import platform import sys import time import torch @@ -21,7 +22,6 @@ import psutil import cv2 import threading from PIL import Image, ImageTk -import resource pool = None args = {} @@ -44,9 +44,14 @@ if os.name == "nt": def limit_resources(): - if args['max_memory'] <= 1: + if args['max_memory'] >= 1: memory = args['max_memory'] * 1024 * 1024 * 1024 - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + if str(platform.system()).lower() == 'linux': + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + if str(platform.system()).lower() == 'windows': + import win32api + win32api.SetProcessWorkingSetSize(-1, memory, memory) def pre_check(): From aa3158576a9b94764b6bc01e6768f770cb617c85 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 13:53:59 +0200 Subject: [PATCH 07/21] max-memory support for Mac --- run.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/run.py b/run.py index 327bee8..af7088c 100644 --- a/run.py +++ b/run.py @@ -46,12 +46,12 @@ if os.name == "nt": def limit_resources(): if args['max_memory'] >= 1: memory = args['max_memory'] * 1024 * 1024 * 1024 - if str(platform.system()).lower() == 'linux': - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) if str(platform.system()).lower() == 'windows': import win32api win32api.SetProcessWorkingSetSize(-1, memory, memory) + else: + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) def pre_check(): From ff7343c2fa29d6a774f9fb3404ce5384d68991cc Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 15:14:18 +0200 Subject: [PATCH 08/21] We need Python 3.4+ --- run.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/run.py b/run.py index af7088c..47ab975 100644 --- a/run.py +++ b/run.py @@ -55,6 +55,8 @@ def limit_resources(): def pre_check(): + if sys.version_info < (3, 4): + quit(f'Python version is not supported - please upgrade to 3.4') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') if os.path.isfile('../inswapper_128.onnx'): From c8e1a991186c18ac9e1ad60ce52d2f10bf13d46a Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 15:14:42 +0200 Subject: [PATCH 09/21] We need Python 3.4+ --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 47ab975..1e8d4f3 100644 --- a/run.py +++ b/run.py @@ -56,7 +56,7 @@ def limit_resources(): def pre_check(): if sys.version_info < (3, 4): - quit(f'Python version is not supported - please upgrade to 3.4') + quit(f'Python version is not supported - please upgrade to 3.4 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') if os.path.isfile('../inswapper_128.onnx'): From 1113e4cc83b4a4de4d383ee2ae05527aec6df6cd Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 15:19:47 +0200 Subject: [PATCH 10/21] Use ctypes to allocate memory in Windows --- run.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/run.py b/run.py index 1e8d4f3..70636c1 100644 --- a/run.py +++ b/run.py @@ -47,8 +47,9 @@ def limit_resources(): if args['max_memory'] >= 1: memory = args['max_memory'] * 1024 * 1024 * 1024 if str(platform.system()).lower() == 'windows': - import win32api - win32api.SetProcessWorkingSetSize(-1, memory, memory) + import ctypes + kernel32 = ctypes.windll.kernel32 + kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) else: import resource resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) @@ -69,7 +70,7 @@ def pre_check(): if CUDA_VERSION > '11.8': quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.") if CUDA_VERSION < '11.4': - quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8.") + quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8") if CUDNN_VERSION < 8220: quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") if CUDNN_VERSION > 8910: From 36e3391ca2fd46f1c56683f2f45c27a852e17860 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 15:21:37 +0200 Subject: [PATCH 11/21] Asking for Python 3.8+ --- run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run.py b/run.py index 70636c1..953805e 100644 --- a/run.py +++ b/run.py @@ -56,8 +56,8 @@ def limit_resources(): def pre_check(): - if sys.version_info < (3, 4): - quit(f'Python version is not supported - please upgrade to 3.4 or higher') + if sys.version_info < (3, 8): + quit(f'Python version is not supported - please upgrade to 3.8 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') if os.path.isfile('../inswapper_128.onnx'): From 41c69d318dd041d58f61576e8d860b9967467244 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 16:29:51 +0200 Subject: [PATCH 12/21] Create face analyser and face swapper instance on demand --- core/config.py | 15 +++++++++++---- core/processor.py | 16 +++++++++++----- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/core/config.py b/core/config.py index 151ad33..8a3d908 100644 --- a/core/config.py +++ b/core/config.py @@ -1,13 +1,20 @@ import insightface import core.globals -face_analyser = insightface.app.FaceAnalysis(name='buffalo_l', providers=core.globals.providers) -face_analyser.prepare(ctx_id=0, det_size=(640, 640)) +FACE_ANALYSER = None + + +def get_face_analyser(): + global FACE_ANALYSER + if FACE_ANALYSER is None: + FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=core.globals.providers) + FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) + return FACE_ANALYSER def get_face(img_data): - analysed = face_analyser.get(img_data) + face = get_face_analyser().get(img_data) try: - return sorted(analysed, key=lambda x: x.bbox[0])[0] + return sorted(face, key=lambda x: x.bbox[0])[0] except IndexError: return None diff --git a/core/processor.py b/core/processor.py index 82d61c1..f09ba57 100644 --- a/core/processor.py +++ b/core/processor.py @@ -1,11 +1,16 @@ -import os import cv2 import insightface -import core.globals from core.config import get_face from core.utils import rreplace -face_swapper = insightface.model_zoo.get_model('inswapper_128.onnx', providers=core.globals.providers) +FACE_SWAPPER = None + + +def get_face_swapper(): + global FACE_SWAPPER + if FACE_SWAPPER is None: + FACE_SWAPPER = insightface.model_zoo.get_model('inswapper_128.onnx') + return FACE_SWAPPER def process_video(source_img, frame_paths): @@ -15,12 +20,13 @@ def process_video(source_img, frame_paths): try: face = get_face(frame) if face: - result = face_swapper.get(frame, face, source_face, paste_back=True) + result = get_face_swapper().get(frame, face, source_face, paste_back=True) cv2.imwrite(frame_path, result) print('.', end='', flush=True) else: print('S', end='', flush=True) except Exception as e: + print(e, flush=True) print('E', end='', flush=True) pass @@ -30,6 +36,6 @@ def process_img(source_img, target_path): face = get_face(frame) source_face = get_face(cv2.imread(source_img)) result = face_swapper.get(frame, face, source_face, paste_back=True) - target_path = rreplace(target_path, "/", "/swapped-", 1) if "/" in target_path else "swapped-"+target_path + target_path = rreplace(target_path, "/", "/swapped-", 1) if "/" in target_path else "swapped-" + target_path print(target_path) cv2.imwrite(target_path, result) From 80fe3721ee93ce5e3df1fbbe4884c3d42af678fa Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 16:35:58 +0200 Subject: [PATCH 13/21] Remove debug output --- core/processor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/core/processor.py b/core/processor.py index cab3022..a2028a1 100644 --- a/core/processor.py +++ b/core/processor.py @@ -26,7 +26,6 @@ def process_video(source_img, frame_paths): else: print('S', end='', flush=True) except Exception as e: - print(e, flush=True) print('E', end='', flush=True) pass From 0adf2a7091f130578ceb81e31740fcf0b80d4ef2 Mon Sep 17 00:00:00 2001 From: Somdev Sangwan Date: Tue, 30 May 2023 22:58:39 +0530 Subject: [PATCH 14/21] fix cases where CUDA_VERSION is None --- run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run.py b/run.py index 07fb06a..ed4f833 100644 --- a/run.py +++ b/run.py @@ -67,6 +67,8 @@ def pre_check(): CUDNN_VERSION = torch.backends.cudnn.version() if 'ROCMExecutionProvider' not in core.globals.providers: + if not torch.cuda.is_available() or not CUDA_VERSION: + quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.") if CUDA_VERSION > '11.8': quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.") if CUDA_VERSION < '11.4': @@ -75,8 +77,6 @@ def pre_check(): quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") if CUDNN_VERSION > 8910: quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1") - if not torch.cuda.is_available(): - quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.") else: core.globals.providers = ['CPUExecutionProvider'] From 864e2fe9dbad72be09fbc61cbfbc78f49396e153 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 20:42:54 +0200 Subject: [PATCH 15/21] Normalize cli args --- run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run.py b/run.py index 7fe4e78..c3e50ba 100644 --- a/run.py +++ b/run.py @@ -30,11 +30,11 @@ parser = argparse.ArgumentParser() parser.add_argument('-f', '--face', help='use this face', dest='source_img') parser.add_argument('-t', '--target', help='replace this face', dest='target_path') parser.add_argument('-o', '--output', help='save output to this file', dest='output_file') -parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False) +parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='set max memory', default=16, type=int) -parser.add_argument('--cores', help='number of cores to use', dest='cores_count', type=int) +parser.add_argument('--max-cores', help='set max cpu cores', dest='cores_count', type=int) for name, value in vars(parser.parse_args()).items(): args[name] = value From 47dafb48ba3dc3e6964532a6355a25cb9d90dc2e Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 20:43:03 +0200 Subject: [PATCH 16/21] Normalize cli args --- README.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index d117967..37076dd 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ Choose a face (image with desired face) and the target image/video (image/video Don't touch the FPS checkbox unless you know what you are doing. Additional command line arguments are given below: + ``` options: -h, --help show this help message and exit @@ -31,12 +32,15 @@ options: use this face -t TARGET_PATH, --target TARGET_PATH replace this face --o OUTPUT_FILE, --output OUTPUT_FILE - save output to this file ---keep-fps keep original fps ---gpu use gpu ---keep-frames don't delete frames directory ---cores number of cores to use + -o OUTPUT_FILE, --output OUTPUT_FILE + save output to this file + --gpu use gpu + --keep-fps maintain original fps + --keep-frames keep frames directory + --max-memory MAX_MEMORY + set max memory + --max-cores CORES_COUNT + set max cpu cores ``` Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode. From ffc2bd24854e960306d6f3a1cf9d5ac696f98cb5 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 21:12:19 +0200 Subject: [PATCH 17/21] Resolve absolute model path --- core/processor.py | 5 ++++- run.py | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/core/processor.py b/core/processor.py index a2028a1..ad1e183 100644 --- a/core/processor.py +++ b/core/processor.py @@ -1,3 +1,5 @@ +import os + import cv2 import insightface from core.config import get_face @@ -9,7 +11,8 @@ FACE_SWAPPER = None def get_face_swapper(): global FACE_SWAPPER if FACE_SWAPPER is None: - FACE_SWAPPER = insightface.model_zoo.get_model('inswapper_128.onnx') + model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') + FACE_SWAPPER = insightface.model_zoo.get_model(model_path) return FACE_SWAPPER diff --git a/run.py b/run.py index c3e50ba..0b61ab4 100644 --- a/run.py +++ b/run.py @@ -64,7 +64,8 @@ def pre_check(): quit(f'Python version is not supported - please upgrade to 3.8 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') - if os.path.isfile('../inswapper_128.onnx'): + model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') + if not os.path.isfile(model_path): quit('File "inswapper_128.onnx" does not exist!') if '--gpu' in sys.argv: CUDA_VERSION = torch.version.cuda From 98a5111d533ae4d1aad524f6efcacbb05e3250cc Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 21:47:59 +0200 Subject: [PATCH 18/21] Undo: Resolve absolute model path --- core/processor.py | 3 +-- run.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/processor.py b/core/processor.py index ad1e183..f1c33e5 100644 --- a/core/processor.py +++ b/core/processor.py @@ -11,8 +11,7 @@ FACE_SWAPPER = None def get_face_swapper(): global FACE_SWAPPER if FACE_SWAPPER is None: - model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') - FACE_SWAPPER = insightface.model_zoo.get_model(model_path) + FACE_SWAPPER = insightface.model_zoo.get_model('inswapper_128.onnx') return FACE_SWAPPER diff --git a/run.py b/run.py index 6aaa4a8..4878164 100644 --- a/run.py +++ b/run.py @@ -61,8 +61,7 @@ def pre_check(): quit(f'Python version is not supported - please upgrade to 3.8 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') - model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') - if not os.path.isfile(model_path): + if not os.path.isfile('inswapper_128.onnx'): quit('File "inswapper_128.onnx" does not exist!') if '--gpu' in sys.argv: CUDA_VERSION = torch.version.cuda From 5feb6b0d7186592c8a1f20f16ca36f090fff6a71 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 21:52:39 +0200 Subject: [PATCH 19/21] Resolve absolute model path --- core/processor.py | 3 ++- run.py | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/core/processor.py b/core/processor.py index f1c33e5..0754e04 100644 --- a/core/processor.py +++ b/core/processor.py @@ -11,7 +11,8 @@ FACE_SWAPPER = None def get_face_swapper(): global FACE_SWAPPER if FACE_SWAPPER is None: - FACE_SWAPPER = insightface.model_zoo.get_model('inswapper_128.onnx') + model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx') + FACE_SWAPPER = insightface.model_zoo.get_model(model_path) return FACE_SWAPPER diff --git a/run.py b/run.py index 4878164..0b61ab4 100644 --- a/run.py +++ b/run.py @@ -34,11 +34,14 @@ parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', de parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='set max memory', default=16, type=int) -parser.add_argument('--max-cores', help='set max cpu cores', dest='cores_count', type=int, default=psutil.cpu_count()-1) +parser.add_argument('--max-cores', help='set max cpu cores', dest='cores_count', type=int) for name, value in vars(parser.parse_args()).items(): args[name] = value +if not args['cores_count']: + args['cores_count'] = psutil.cpu_count()-1 + sep = "/" if os.name == "nt": sep = "\\" @@ -61,7 +64,8 @@ def pre_check(): quit(f'Python version is not supported - please upgrade to 3.8 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') - if not os.path.isfile('inswapper_128.onnx'): + model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') + if not os.path.isfile(model_path): quit('File "inswapper_128.onnx" does not exist!') if '--gpu' in sys.argv: CUDA_VERSION = torch.version.cuda From 3e35bea1df38c179adb2d7cd6f737734aedee0f6 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 22:16:38 +0200 Subject: [PATCH 20/21] Adjust max-cores with lower default --- run.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/run.py b/run.py index 0b61ab4..65749b4 100644 --- a/run.py +++ b/run.py @@ -34,14 +34,11 @@ parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', de parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='set max memory', default=16, type=int) -parser.add_argument('--max-cores', help='set max cpu cores', dest='cores_count', type=int) +parser.add_argument('--max-cores', help='number of cores to use', dest='cores_count', type=int, default=psutil.cpu_count() - 2) for name, value in vars(parser.parse_args()).items(): args[name] = value -if not args['cores_count']: - args['cores_count'] = psutil.cpu_count()-1 - sep = "/" if os.name == "nt": sep = "\\" From 411818d0f94f2a8b1fe791052f60efab187527b8 Mon Sep 17 00:00:00 2001 From: henryruhs Date: Tue, 30 May 2023 22:44:48 +0200 Subject: [PATCH 21/21] Need to return at least 2 cores for default --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 65749b4..19a10be 100644 --- a/run.py +++ b/run.py @@ -34,7 +34,7 @@ parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', de parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='set max memory', default=16, type=int) -parser.add_argument('--max-cores', help='number of cores to use', dest='cores_count', type=int, default=psutil.cpu_count() - 2) +parser.add_argument('--max-cores', help='number of cores to use', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2)) for name, value in vars(parser.parse_args()).items(): args[name] = value