diff --git a/roop/core.py b/roop/core.py index 4a7b9b8..371191d 100644 --- a/roop/core.py +++ b/roop/core.py @@ -36,7 +36,7 @@ parser = argparse.ArgumentParser() parser.add_argument('-f', '--face', help='use this face', dest='source_img') parser.add_argument('-t', '--target', help='replace this face', dest='target_path') parser.add_argument('-o', '--output', help='save output to this file', dest='output_file') -parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False) +parser.add_argument('--gpu', help='choice your gpu vendor', dest='gpu', choices=['amd', 'nvidia']) parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False) parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False) parser.add_argument('--max-memory', help='maximum amount of RAM in GB to be used', type=int) @@ -46,7 +46,10 @@ parser.add_argument('--all-faces', help='swap all faces in frame', dest='all_fac for name, value in vars(parser.parse_args()).items(): args[name] = value -if '--all-faces' in sys.argv or '-a' in sys.argv: +if 'gpu' in args: + roop.globals.gpu = args['gpu'] + +if 'all-faces' in args: roop.globals.all_faces = True sep = "/" @@ -74,33 +77,31 @@ def pre_check(): model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx') if not os.path.isfile(model_path): quit('File "inswapper_128.onnx" does not exist!') - if '--gpu' in sys.argv: - roop.globals.use_gpu = True - NVIDIA_PROVIDERS = ['CUDAExecutionProvider', 'TensorrtExecutionProvider'] - if len(list(set(roop.globals.providers) - set(NVIDIA_PROVIDERS))) == 1: - CUDA_VERSION = torch.version.cuda - CUDNN_VERSION = torch.backends.cudnn.version() - if not torch.cuda.is_available() or not CUDA_VERSION: - quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.") - if CUDA_VERSION > '11.8': - quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8") - if CUDA_VERSION < '11.4': - quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8") - if CUDNN_VERSION < 8220: - quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") - if CUDNN_VERSION > 8910: - quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1") + if roop.globals.gpu == 'amd': + if 'ROCMExecutionProvider' not in roop.globals.providers: + quit("You are using --gpu=amd flag but ROCM isn't available or properly installed on your system.") + if roop.globals.gpu == 'nvidia': + CUDA_VERSION = torch.version.cuda + CUDNN_VERSION = torch.backends.cudnn.version() + if not torch.cuda.is_available() or not CUDA_VERSION: + quit("You are using --gpu=nvidia flag but CUDA isn't available or properly installed on your system.") + if CUDA_VERSION > '11.8': + quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8") + if CUDA_VERSION < '11.4': + quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8") + if CUDNN_VERSION < 8220: + quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1") + if CUDNN_VERSION > 8910: + quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1") else: roop.globals.providers = ['CPUExecutionProvider'] - if '--all-faces' in sys.argv or '-a' in sys.argv: - roop.globals.all_faces = True def start_processing(): frame_paths = args["frame_paths"] n = len(frame_paths) // (args['cores_count']) # single thread - if roop.globals.use_gpu or n < 2: + if roop.globals.gpu == 'amd' or roop.globals.gpu == 'nvidia' or n < 2: process_video(args['source_img'], args["frame_paths"]) return # multithread if total frames to cpu cores ratio is greater than 2 diff --git a/roop/globals.py b/roop/globals.py index d0f621a..34adafd 100644 --- a/roop/globals.py +++ b/roop/globals.py @@ -1,8 +1,8 @@ import onnxruntime -use_gpu = False +gpu = None all_faces = False -log_level = 'quiet' +log_level = 'error' providers = onnxruntime.get_available_providers() if 'TensorrtExecutionProvider' in providers: diff --git a/roop/utils.py b/roop/utils.py index 670385a..34976ba 100644 --- a/roop/utils.py +++ b/roop/utils.py @@ -29,12 +29,14 @@ def detect_fps(input_path): pass return 30, 30 + def run_ffmpeg(args): - hwaccel_option = '-hwaccel cuda' if roop.globals.use_gpu else '' + hwaccel_option = '-hwaccel cuda' if roop.globals.gpu == 'nvidia' else '' log_level = f'-loglevel {roop.globals.log_level}' os.system(f'ffmpeg {hwaccel_option} {log_level} {args}') + def set_fps(input_path, output_path, fps): input_path, output_path = path(input_path), path(output_path) run_ffmpeg(f'-i "{input_path}" -filter:v fps=fps={fps} "{output_path}"')