Compare commits

..

No commits in common. "main" and "cog" have entirely different histories.
main ... cog

10 changed files with 92 additions and 119 deletions

View File

@ -1,2 +0,0 @@
[flake8]
select = E3, E4, F

View File

@ -1,16 +0,0 @@
name: ci
on: [ push, pull_request ]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- run: pip install flake8
- run: flake8 run.py core

View File

@ -4,14 +4,8 @@ That's it, that's the software. You can watch some demos [here](https://drive.go
![demo-gif](demo.gif)
## Disclaimer
Better deepfake software than this already exist, this is just a hobby project I created to learn about AI. Users must get consent from the concerned people before using their face and must not hide the fact that it is a deepfake when posting content online. I am not responsible for malicious behaviour of end-users.
To prevent misuse, it has a built-in check which prevents the program from working on inappropriate media.
## How do I install it?
**Issues according installation will be closed without ceremony from now on, we cannot handle the amount of requests.**
> Note: The instructions may or may not work for you. Use google or look through issues people have created here to solve your problems.
There are two types of installations: basic and gpu-powered.
@ -44,9 +38,9 @@ options:
--keep-fps maintain original fps
--keep-frames keep frames directory
--max-memory MAX_MEMORY
maximum amount of RAM in GB to be used
set max memory
--max-cores CORES_COUNT
number of cores to be use for CPU mode
set max cpu cores
```
Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode.
@ -56,6 +50,9 @@ Looking for a CLI mode? Using the -f/--face argument will make the program in cl
- [ ] Replace a selective face throughout the video
- [ ] Support for replacing multiple faces
## Disclaimer
Better deepfake software than this already exist, this is just a hobby project I created to learn about AI. Users are expected to use this program for learning programming and using the software in good faith. Users must get consent from the concerned people before using their face and must not hide the fact that it is a deepfake when posting content online. I am not responsible for malicious behaviour of end-users.
## Credits
- [ffmpeg](https://ffmpeg.org/): for making video related operations easy
- [deepinsight](https://github.com/deepinsight): for their [insightface](https://github.com/deepinsight/insightface) project which provided a well-made library and models.

View File

@ -1,3 +1,4 @@
import torch
import onnxruntime
use_gpu = False

42
core/processor.py Normal file
View File

@ -0,0 +1,42 @@
import os
import cv2
import insightface
from core.config import get_face
from core.utils import rreplace
FACE_SWAPPER = None
def get_face_swapper():
global FACE_SWAPPER
if FACE_SWAPPER is None:
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx')
FACE_SWAPPER = insightface.model_zoo.get_model(model_path)
return FACE_SWAPPER
def process_video(source_img, frame_paths):
source_face = get_face(cv2.imread(source_img))
for frame_path in frame_paths:
frame = cv2.imread(frame_path)
try:
face = get_face(frame)
if face:
result = get_face_swapper().get(frame, face, source_face, paste_back=True)
cv2.imwrite(frame_path, result)
print('.', end='', flush=True)
else:
print('S', end='', flush=True)
except Exception as e:
print('E', end='', flush=True)
pass
def process_img(source_img, target_path, output_file):
frame = cv2.imread(target_path)
face = get_face(frame)
source_face = get_face(cv2.imread(source_img))
result = get_face_swapper().get(frame, face, source_face, paste_back=True)
cv2.imwrite(output_file, result)
print("\n\nImage saved as:", output_file, "\n\n")

View File

@ -1,44 +0,0 @@
import os
from tqdm import tqdm
import cv2
import insightface
import core.globals
from core.analyser import get_face
FACE_SWAPPER = None
def get_face_swapper():
global FACE_SWAPPER
if FACE_SWAPPER is None:
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx')
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=core.globals.providers)
return FACE_SWAPPER
def process_video(source_img, frame_paths):
source_face = get_face(cv2.imread(source_img))
with tqdm(total=len(frame_paths), desc="Processing", unit="frame", dynamic_ncols=True, bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]') as progress:
for frame_path in frame_paths:
frame = cv2.imread(frame_path)
try:
face = get_face(frame)
if face:
result = get_face_swapper().get(frame, face, source_face, paste_back=True)
cv2.imwrite(frame_path, result)
progress.set_postfix(status='.', refresh=True)
else:
progress.set_postfix(status='S', refresh=True)
except Exception:
progress.set_postfix(status='E', refresh=True)
pass
progress.update(1)
def process_img(source_img, target_path, output_file):
frame = cv2.imread(target_path)
face = get_face(frame)
source_face = get_face(cv2.imread(source_img))
result = get_face_swapper().get(frame, face, source_face, paste_back=True)
cv2.imwrite(output_file, result)
print("\n\nImage saved as:", output_file, "\n\n")

View File

@ -23,10 +23,10 @@ def detect_fps(input_path):
output = os.popen(f'ffprobe -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate "{input_path}"').read()
if "/" in output:
try:
return int(output.split(os.sep)[0]) // int(output.split(os.sep)[1]), output.removesuffix('\n')
return int(output.split("/")[0]) // int(output.split("/")[1])
except:
pass
return 30, 30
return 60
def set_fps(input_path, output_path, fps):
@ -36,7 +36,7 @@ def set_fps(input_path, output_path, fps):
def create_video(video_name, fps, output_dir):
output_dir = path(output_dir)
os.system(f'ffmpeg -framerate "{fps}" -i "{output_dir}{sep}%04d.png" -c:v libx264 -crf 7 -pix_fmt yuv420p -y "{output_dir}{sep}output.mp4"')
os.system(f'ffmpeg -framerate {fps} -i "{output_dir}{sep}%04d.png" -c:v libx264 -crf 7 -pix_fmt yuv420p -y "{output_dir}{sep}output.mp4"')
def extract_frames(input_path, output_dir):
@ -44,13 +44,14 @@ def extract_frames(input_path, output_dir):
os.system(f'ffmpeg -i "{input_path}" "{output_dir}{sep}%04d.png"')
def add_audio(output_dir, target_path, video, keep_frames, output_file):
video_name = os.path.splitext(video)[0]
save_to = output_file if output_file else output_dir + "/swapped-" + video_name + ".mp4"
def add_audio(output_dir, target_path, keep_frames, output_file):
video = target_path.split("/")[-1]
video_name = video.split(".")[0]
save_to = output_file if output_file else output_dir + f"/swapped-" + video_name + ".mp4"
save_to_ff, output_dir_ff = path(save_to), path(output_dir)
os.system(f'ffmpeg -i "{output_dir_ff}{sep}output.mp4" -i "{output_dir_ff}{sep}{video}" -c:v copy -map 0:v:0 -map 1:a:0 -y "{save_to_ff}"')
if not os.path.isfile(save_to):
shutil.move(output_dir + "/output.mp4", save_to)
shutil.move(output_dir + f"/output.mp4", save_to)
if not keep_frames:
shutil.rmtree(output_dir)

View File

@ -1,4 +1,4 @@
numpy==1.23.5
numpy==1.24.3
opencv-python==4.7.0.72
onnx==1.14.0
insightface==0.7.3
@ -6,8 +6,4 @@ psutil==5.9.5
tk==0.1.0
pillow==9.5.0
torch==2.0.1
onnxruntime-gpu==1.15.0
tensorflow==2.12.0
opennsfw2==0.10.2
protobuf==4.23.2
tqdm==4.65.0
onnxruntime==1.15.0

64
run.py Executable file → Normal file
View File

@ -1,36 +1,31 @@
#!/usr/bin/env python3
import platform
import signal
import sys
import time
import torch
import shutil
import core.globals
import glob
import argparse
import multiprocessing as mp
import os
import torch
from pathlib import Path
import tkinter as tk
from tkinter import filedialog
from opennsfw2 import predict_video_frames, predict_image
from tkinter.filedialog import asksaveasfilename
from core.processor import process_video, process_img
from core.utils import is_img, detect_fps, set_fps, create_video, add_audio, extract_frames, rreplace
from core.config import get_face
import webbrowser
import psutil
import cv2
import threading
from PIL import Image, ImageTk
import core.globals
from core.swapper import process_video, process_img
from core.utils import is_img, detect_fps, set_fps, create_video, add_audio, extract_frames, rreplace
from core.analyser import get_face
if 'ROCMExecutionProvider' in core.globals.providers:
del torch
pool = None
args = {}
signal.signal(signal.SIGINT, lambda signal_number, frame: quit())
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--face', help='use this face', dest='source_img')
parser.add_argument('-t', '--target', help='replace this face', dest='target_path')
@ -38,8 +33,8 @@ parser.add_argument('-o', '--output', help='save output to this file', dest='out
parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False)
parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False)
parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False)
parser.add_argument('--max-memory', help='maximum amount of RAM in GB to be used', type=int)
parser.add_argument('--max-cores', help='number of cores to be use for CPU mode', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2))
parser.add_argument('--max-memory', help='set max memory', default=16, type=int)
parser.add_argument('--max-cores', help='number of cores to use', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2))
for name, value in vars(parser.parse_args()).items():
args[name] = value
@ -50,7 +45,7 @@ if os.name == "nt":
def limit_resources():
if args['max_memory']:
if args['max_memory'] >= 1:
memory = args['max_memory'] * 1024 * 1024 * 1024
if str(platform.system()).lower() == 'windows':
import ctypes
@ -62,21 +57,22 @@ def limit_resources():
def pre_check():
if sys.version_info < (3, 9):
quit('Python version is not supported - please upgrade to 3.9 or higher')
if sys.version_info < (3, 8):
quit(f'Python version is not supported - please upgrade to 3.8 or higher')
if not shutil.which('ffmpeg'):
quit('ffmpeg is not installed!')
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx')
if not os.path.isfile(model_path):
quit('File "inswapper_128.onnx" does not exist!')
if '--gpu' in sys.argv:
if 'ROCMExecutionProvider' not in core.globals.providers:
CUDA_VERSION = torch.version.cuda
CUDNN_VERSION = torch.backends.cudnn.version()
NVIDIA_PROVIDERS = ['CUDAExecutionProvider', 'TensorrtExecutionProvider']
if len(list(set(core.globals.providers) - set(NVIDIA_PROVIDERS))) > 1:
if not torch.cuda.is_available() or not CUDA_VERSION:
quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.")
if CUDA_VERSION > '11.8':
quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8")
quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8.")
if CUDA_VERSION < '11.4':
quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8")
if CUDNN_VERSION < 8220:
@ -88,8 +84,12 @@ def pre_check():
def start_processing():
start_time = time.time()
if args['gpu']:
process_video(args['source_img'], args["frame_paths"])
end_time = time.time()
print(flush=True)
print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True)
return
frame_paths = args["frame_paths"]
n = len(frame_paths)//(args['cores_count'])
@ -101,6 +101,9 @@ def start_processing():
p.get()
pool.close()
pool.join()
end_time = time.time()
print(flush=True)
print(f"Processing time: {end_time - start_time:.2f} seconds", flush=True)
def preview_image(image_path):
@ -168,6 +171,7 @@ def status(string):
def start():
print("DON'T WORRY. IT'S NOT STUCK/CRASHED.\n" * 5)
if not args['source_img'] or not os.path.isfile(args['source_img']):
print("\n[WARNING] Please select an image containing a face.")
return
@ -175,8 +179,7 @@ def start():
print("\n[WARNING] Please select a video/image to swap face in.")
return
if not args['output_file']:
target_path = args['target_path']
args['output_file'] = rreplace(target_path, "/", "/swapped-", 1) if "/" in target_path else "swapped-" + target_path
args['output_file'] = rreplace(args['target_path'], "/", "/swapped-", 1) if "/" in target_path else "swapped-"+target_path
global pool
pool = mp.Pool(args['cores_count'])
target_path = args['target_path']
@ -185,38 +188,33 @@ def start():
print("\n[WARNING] No face detected in source image. Please try with another one.\n")
return
if is_img(target_path):
if predict_image(target_path) > 0.7:
quit()
process_img(args['source_img'], target_path, args['output_file'])
status("swap successful!")
return
seconds, probabilities = predict_video_frames(video_path=args['target_path'], frame_interval=100)
if any(probability > 0.7 for probability in probabilities):
quit()
video_name_full = target_path.split("/")[-1]
video_name = os.path.splitext(video_name_full)[0]
output_dir = os.path.dirname(target_path) + "/" + video_name
video_name = os.path.basename(target_path)
video_name = os.path.splitext(video_name)[0]
output_dir = os.path.join(os.path.dirname(target_path),video_name)
Path(output_dir).mkdir(exist_ok=True)
status("detecting video's FPS...")
fps, exact_fps = detect_fps(target_path)
fps = detect_fps(target_path)
if not args['keep_fps'] and fps > 30:
this_path = output_dir + "/" + video_name + ".mp4"
set_fps(target_path, this_path, 30)
target_path, exact_fps = this_path, 30
target_path, fps = this_path, 30
else:
shutil.copy(target_path, output_dir)
status("extracting frames...")
extract_frames(target_path, output_dir)
args['frame_paths'] = tuple(sorted(
glob.glob(output_dir + "/*.png"),
glob.glob(output_dir + f"/*.png"),
key=lambda x: int(x.split(sep)[-1].replace(".png", ""))
))
status("swapping in progress...")
start_processing()
status("creating video...")
create_video(video_name, exact_fps, output_dir)
create_video(video_name, fps, output_dir)
status("adding audio...")
add_audio(output_dir, target_path, video_name_full, args['keep_frames'], args['output_file'])
add_audio(output_dir, target_path, args['keep_frames'], args['output_file'])
save_path = args['output_file'] if args['output_file'] else output_dir + "/" + video_name + ".mp4"
print("\n\nVideo saved as:", save_path, "\n\n")
status("swap successful!")