Merge pull request #91 from henryruhs/main

Introduce pre_check() and limit_resources()
This commit is contained in:
Somdev Sangwan 2023-05-31 02:27:34 +05:30 committed by GitHub
commit 251fde9596
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 87 additions and 33 deletions

View File

@ -24,18 +24,23 @@ Choose a face (image with desired face) and the target image/video (image/video
Don't touch the FPS checkbox unless you know what you are doing. Don't touch the FPS checkbox unless you know what you are doing.
Additional command line arguments are given below: Additional command line arguments are given below:
``` ```
-h, --help show this help message and exit options:
-f SOURCE_IMG, --face SOURCE_IMG -h, --help show this help message and exit
-f SOURCE_IMG, --face SOURCE_IMG
use this face use this face
-t TARGET_PATH, --target TARGET_PATH -t TARGET_PATH, --target TARGET_PATH
replace this face replace this face
-o OUTPUT_FILE, --output OUTPUT_FILE -o OUTPUT_FILE, --output OUTPUT_FILE
save output to this file save output to this file
--keep-fps keep original fps --gpu use gpu
--gpu use gpu --keep-fps maintain original fps
--keep-frames don't delete frames directory --keep-frames keep frames directory
--cores number of cores to use --max-memory MAX_MEMORY
set max memory
--max-cores CORES_COUNT
set max cpu cores
``` ```
Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode. Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode.

View File

@ -1,13 +1,20 @@
import insightface import insightface
import core.globals import core.globals
face_analyser = insightface.app.FaceAnalysis(name='buffalo_l', providers=core.globals.providers) FACE_ANALYSER = None
face_analyser.prepare(ctx_id=0, det_size=(640, 640))
def get_face_analyser():
global FACE_ANALYSER
if FACE_ANALYSER is None:
FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=core.globals.providers)
FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
return FACE_ANALYSER
def get_face(img_data): def get_face(img_data):
analysed = face_analyser.get(img_data) face = get_face_analyser().get(img_data)
try: try:
return sorted(analysed, key=lambda x: x.bbox[0])[0] return sorted(face, key=lambda x: x.bbox[0])[0]
except IndexError: except IndexError:
return None return None

View File

@ -1,14 +1,19 @@
import os import os
import cv2 import cv2
import insightface import insightface
import core.globals
from core.config import get_face from core.config import get_face
from core.utils import rreplace from core.utils import rreplace
if os.path.isfile('inswapper_128.onnx'): FACE_SWAPPER = None
face_swapper = insightface.model_zoo.get_model('inswapper_128.onnx', providers=core.globals.providers)
else:
quit('File "inswapper_128.onnx" does not exist!') def get_face_swapper():
global FACE_SWAPPER
if FACE_SWAPPER is None:
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx')
FACE_SWAPPER = insightface.model_zoo.get_model(model_path)
return FACE_SWAPPER
def process_video(source_img, frame_paths): def process_video(source_img, frame_paths):
@ -18,7 +23,7 @@ def process_video(source_img, frame_paths):
try: try:
face = get_face(frame) face = get_face(frame)
if face: if face:
result = face_swapper.get(frame, face, source_face, paste_back=True) result = get_face_swapper().get(frame, face, source_face, paste_back=True)
cv2.imwrite(frame_path, result) cv2.imwrite(frame_path, result)
print('.', end='', flush=True) print('.', end='', flush=True)
else: else:
@ -34,4 +39,4 @@ def process_img(source_img, target_path, output_file):
source_face = get_face(cv2.imread(source_img)) source_face = get_face(cv2.imread(source_img))
result = face_swapper.get(frame, face, source_face, paste_back=True) result = face_swapper.get(frame, face, source_face, paste_back=True)
cv2.imwrite(output_file, result) cv2.imwrite(output_file, result)
print("\n\nImage saved as:", output_file, "\n\n") print("\n\nImage saved as:", output_file, "\n\n")

63
run.py
View File

@ -1,19 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import platform
import sys import sys
import time import time
import torch
import shutil import shutil
import core.globals import core.globals
if not shutil.which('ffmpeg'):
print('ffmpeg is not installed. Read the docs: https://github.com/s0md3v/roop#installation.\n' * 10)
quit()
if '--gpu' not in sys.argv:
core.globals.providers = ['CPUExecutionProvider']
elif 'ROCMExecutionProvider' not in core.globals.providers:
import torch
if not torch.cuda.is_available():
quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.")
import glob import glob
import argparse import argparse
import multiprocessing as mp import multiprocessing as mp
@ -38,10 +30,11 @@ parser = argparse.ArgumentParser()
parser.add_argument('-f', '--face', help='use this face', dest='source_img') parser.add_argument('-f', '--face', help='use this face', dest='source_img')
parser.add_argument('-t', '--target', help='replace this face', dest='target_path') parser.add_argument('-t', '--target', help='replace this face', dest='target_path')
parser.add_argument('-o', '--output', help='save output to this file', dest='output_file') parser.add_argument('-o', '--output', help='save output to this file', dest='output_file')
parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False)
parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False) parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False)
parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False)
parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False)
parser.add_argument('--cores', help='number of cores to use', dest='cores_count', type=int, default=psutil.cpu_count()-1) parser.add_argument('--max-memory', help='set max memory', default=16, type=int)
parser.add_argument('--max-cores', help='number of cores to use', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2))
for name, value in vars(parser.parse_args()).items(): for name, value in vars(parser.parse_args()).items():
args[name] = value args[name] = value
@ -51,6 +44,45 @@ if os.name == "nt":
sep = "\\" sep = "\\"
def limit_resources():
if args['max_memory'] >= 1:
memory = args['max_memory'] * 1024 * 1024 * 1024
if str(platform.system()).lower() == 'windows':
import ctypes
kernel32 = ctypes.windll.kernel32
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
else:
import resource
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
def pre_check():
if sys.version_info < (3, 8):
quit(f'Python version is not supported - please upgrade to 3.8 or higher')
if not shutil.which('ffmpeg'):
quit('ffmpeg is not installed!')
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx')
if not os.path.isfile(model_path):
quit('File "inswapper_128.onnx" does not exist!')
if '--gpu' in sys.argv:
CUDA_VERSION = torch.version.cuda
CUDNN_VERSION = torch.backends.cudnn.version()
if 'ROCMExecutionProvider' not in core.globals.providers:
if not torch.cuda.is_available() or not CUDA_VERSION:
quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.")
if CUDA_VERSION > '11.8':
quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.")
if CUDA_VERSION < '11.4':
quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8")
if CUDNN_VERSION < 8220:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1")
if CUDNN_VERSION > 8910:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1")
else:
core.globals.providers = ['CPUExecutionProvider']
def start_processing(): def start_processing():
start_time = time.time() start_time = time.time()
if args['gpu']: if args['gpu']:
@ -73,6 +105,7 @@ def start_processing():
print(flush=True) print(flush=True)
print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True) print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True)
def preview_image(image_path): def preview_image(image_path):
img = Image.open(image_path) img = Image.open(image_path)
img = img.resize((180, 180), Image.ANTIALIAS) img = img.resize((180, 180), Image.ANTIALIAS)
@ -189,6 +222,10 @@ def start():
if __name__ == "__main__": if __name__ == "__main__":
global status_label, window global status_label, window
pre_check()
limit_resources()
if args['source_img']: if args['source_img']:
args['cli_mode'] = True args['cli_mode'] = True
start() start()