diff --git a/.github/examples/face.jpg b/.github/examples/face.jpg new file mode 100644 index 0000000..bdaa5e4 Binary files /dev/null and b/.github/examples/face.jpg differ diff --git a/.github/examples/snapshot.mp4 b/.github/examples/snapshot.mp4 new file mode 100644 index 0000000..d2557ac Binary files /dev/null and b/.github/examples/snapshot.mp4 differ diff --git a/.github/examples/target.mp4 b/.github/examples/target.mp4 new file mode 100644 index 0000000..a8f3f6f Binary files /dev/null and b/.github/examples/target.mp4 differ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1d6de8c..ce8d223 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,9 +8,26 @@ jobs: steps: - name: Checkout uses: actions/checkout@v2 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v2 with: - python-version: 3.8 + python-version: 3.9 - run: pip install flake8 - run: flake8 run.py core + test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up ffmpeg + uses: FedericoCarboni/setup-ffmpeg@v2 + - name: Set up Python 3.9 + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - run: pip install -r requirements.txt + - run: pip install gdown + - run: gdown 14JzEMo8ScLinvBkl7QEvYvFEi7EBXNAt + - run: ./run.py -f=.github/examples/face.jpg -t=.github/examples/target.mp4 -o=.github/examples/output.mp4 + - run: ffmpeg -i .github/examples/snapshot.mp4 -i .github/examples/output.mp4 -filter_complex "psnr" -f null - + diff --git a/README.md b/README.md index 56b5c25..20eb6d1 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,14 @@ That's it, that's the software. You can watch some demos [here](https://drive.go ![demo-gif](demo.gif) +## Disclaimer +Better deepfake software than this already exist, this is just a hobby project I created to learn about AI. Users must get consent from the concerned people before using their face and must not hide the fact that it is a deepfake when posting content online. I am not responsible for malicious behaviour of end-users. + +To prevent misuse, it has a built-in check which prevents the program from working on inappropriate media. + ## How do I install it? -> Note: The instructions may or may not work for you. Use google or look through issues people have created here to solve your problems. + +**Issues according installation will be closed without ceremony from now on, we cannot handle the amount of requests.** There are two types of installations: basic and gpu-powered. @@ -50,9 +56,6 @@ Looking for a CLI mode? Using the -f/--face argument will make the program in cl - [ ] Replace a selective face throughout the video - [ ] Support for replacing multiple faces -## Disclaimer -Better deepfake software than this already exist, this is just a hobby project I created to learn about AI. Users are expected to use this program for learning programming and using the software in good faith. Users must get consent from the concerned people before using their face and must not hide the fact that it is a deepfake when posting content online. I am not responsible for malicious behaviour of end-users. - ## Credits - [ffmpeg](https://ffmpeg.org/): for making video related operations easy - [deepinsight](https://github.com/deepinsight): for their [insightface](https://github.com/deepinsight/insightface) project which provided a well-made library and models. diff --git a/core/utils.py b/core/utils.py index 9a1498e..50aca35 100644 --- a/core/utils.py +++ b/core/utils.py @@ -23,7 +23,7 @@ def detect_fps(input_path): output = os.popen(f'ffprobe -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate "{input_path}"').read() if "/" in output: try: - return int(output.split(os.sep)[0]) // int(output.split(os.sep)[1]), output.removesuffix('\n') + return int(output.split("/")[0]) // int(output.split("/")[1].strip()), output.strip() except: pass return 30, 30 diff --git a/requirements.txt b/requirements.txt index 88b40db..eaccae0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,8 +6,10 @@ psutil==5.9.5 tk==0.1.0 pillow==9.5.0 torch==2.0.1 -onnxruntime-gpu==1.15.0 -tensorflow==2.12.0 +onnxruntime==1.15.0; sys_platform == 'darwin' +onnxruntime-gpu==1.15.0; sys_platform != 'darwin' +tensorflow==2.13.0rc1; sys_platform == 'darwin' +tensorflow==2.12.0; sys_platform != 'darwin' opennsfw2==0.10.2 -protobuf==3.20.2 +protobuf==4.23.2 tqdm==4.65.0 diff --git a/run.py b/run.py index 6fe9192..a7d4abe 100755 --- a/run.py +++ b/run.py @@ -3,7 +3,6 @@ import platform import signal import sys -import time import shutil import glob import argparse @@ -64,15 +63,16 @@ def limit_resources(): def pre_check(): - if sys.version_info < (3, 8): - quit('Python version is not supported - please upgrade to 3.8 or higher') + if sys.version_info < (3, 9): + quit('Python version is not supported - please upgrade to 3.9 or higher') if not shutil.which('ffmpeg'): quit('ffmpeg is not installed!') model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx') if not os.path.isfile(model_path): quit('File "inswapper_128.onnx" does not exist!') if '--gpu' in sys.argv: - if 'ROCMExecutionProvider' not in core.globals.providers: + NVIDIA_PROVIDERS = ['CUDAExecutionProvider', 'TensorrtExecutionProvider'] + if len(list(set(core.globals.providers) - set(NVIDIA_PROVIDERS))) == 1: CUDA_VERSION = torch.version.cuda CUDNN_VERSION = torch.backends.cudnn.version() if not torch.cuda.is_available() or not CUDA_VERSION: @@ -92,12 +92,8 @@ def pre_check(): def start_processing(): - start_time = time.time() if args['gpu']: process_video(args['source_img'], args["frame_paths"]) - end_time = time.time() - print(flush=True) - print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True) return frame_paths = args["frame_paths"] n = len(frame_paths)//(args['cores_count']) @@ -109,9 +105,6 @@ def start_processing(): p.get() pool.close() pool.join() - end_time = time.time() - print(flush=True) - print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True) def preview_image(image_path): @@ -156,7 +149,7 @@ def select_target(): def toggle_fps_limit(): - args['keep_fps'] = limit_fps.get() != True + args['keep_fps'] = int(limit_fps.get() != True) def toggle_all_faces(): @@ -164,7 +157,7 @@ def toggle_all_faces(): def toggle_keep_frames(): - args['keep_frames'] = keep_frames.get() != True + args['keep_frames'] = int(keep_frames.get()) def save_file(): @@ -183,7 +176,6 @@ def status(string): def start(): - print("DON'T WORRY. IT'S NOT STUCK/CRASHED.\n" * 5) if not args['source_img'] or not os.path.isfile(args['source_img']): print("\n[WARNING] Please select an image containing a face.") return @@ -206,8 +198,8 @@ def start(): process_img(args['source_img'], target_path, args['output_file']) status("swap successful!") return - seconds, probabilities = predict_video_frames(video_path=args['target_path'], frame_interval=50) - if any(probability > 0.7 for probability in probabilities): + seconds, probabilities = predict_video_frames(video_path=args['target_path'], frame_interval=100) + if any(probability > 0.85 for probability in probabilities): quit() video_name_full = target_path.split("/")[-1] video_name = os.path.splitext(video_name_full)[0] @@ -273,13 +265,12 @@ if __name__ == "__main__": all_faces_checkbox.place(x=30,y=500,width=240,height=31) # FPS limit checkbox - limit_fps = tk.IntVar() + limit_fps = tk.IntVar(None, not args['keep_fps']) fps_checkbox = tk.Checkbutton(window, relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Limit FPS to 30", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=limit_fps, command=toggle_fps_limit) - fps_checkbox.place(x=30,y=475,width=240,height=31) - fps_checkbox.select() + fps_checkbox.place(x=30,y=500,width=240,height=31) # Keep frames checkbox - keep_frames = tk.IntVar() + keep_frames = tk.IntVar(None, args['keep_frames']) frames_checkbox = tk.Checkbutton(window, relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Keep frames dir", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=keep_frames, command=toggle_keep_frames) frames_checkbox.place(x=30,y=450,width=240,height=31)