Merge pull request #299 from s0md3v/next

Next
This commit is contained in:
Henry Ruhs 2023-06-02 21:39:48 +02:00 committed by GitHub
commit 25a0768b08
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 306 additions and 290 deletions

View File

@ -47,6 +47,7 @@ options:
maximum amount of RAM in GB to be used maximum amount of RAM in GB to be used
--max-cores CORES_COUNT --max-cores CORES_COUNT
number of cores to be use for CPU mode number of cores to be use for CPU mode
--all-faces swap all faces in frame
``` ```
Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode. Looking for a CLI mode? Using the -f/--face argument will make the program in cli mode.

View File

@ -1,5 +1,5 @@
import insightface import insightface
import core.globals import roop.globals
FACE_ANALYSER = None FACE_ANALYSER = None
@ -7,7 +7,7 @@ FACE_ANALYSER = None
def get_face_analyser(): def get_face_analyser():
global FACE_ANALYSER global FACE_ANALYSER
if FACE_ANALYSER is None: if FACE_ANALYSER is None:
FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=core.globals.providers) FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.providers)
FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
return FACE_ANALYSER return FACE_ANALYSER

292
roop/core.py Executable file
View File

@ -0,0 +1,292 @@
#!/usr/bin/env python3
import platform
import signal
import sys
import shutil
import glob
import argparse
import multiprocessing as mp
import os
import torch
from pathlib import Path
import tkinter as tk
from tkinter import filedialog
from opennsfw2 import predict_video_frames, predict_image
from tkinter.filedialog import asksaveasfilename
import webbrowser
import psutil
import cv2
import threading
from PIL import Image, ImageTk
import roop.globals
from roop.swapper import process_video, process_img
from roop.utils import is_img, detect_fps, set_fps, create_video, add_audio, extract_frames, rreplace
from roop.analyser import get_face_single
if 'ROCMExecutionProvider' in roop.globals.providers:
del torch
pool = None
args = {}
signal.signal(signal.SIGINT, lambda signal_number, frame: quit())
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--face', help='use this face', dest='source_img')
parser.add_argument('-t', '--target', help='replace this face', dest='target_path')
parser.add_argument('-o', '--output', help='save output to this file', dest='output_file')
parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False)
parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False)
parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False)
parser.add_argument('--max-memory', help='maximum amount of RAM in GB to be used', type=int)
parser.add_argument('--max-cores', help='number of cores to be use for CPU mode', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2))
parser.add_argument('--all-faces', help='swap all faces in frame', dest='all_faces', action='store_true', default=False)
for name, value in vars(parser.parse_args()).items():
args[name] = value
if '--all-faces' in sys.argv or '-a' in sys.argv:
roop.globals.all_faces = True
sep = "/"
if os.name == "nt":
sep = "\\"
def limit_resources():
if args['max_memory']:
memory = args['max_memory'] * 1024 * 1024 * 1024
if str(platform.system()).lower() == 'windows':
import ctypes
kernel32 = ctypes.windll.kernel32
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
else:
import resource
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
def pre_check():
if sys.version_info < (3, 9):
quit('Python version is not supported - please upgrade to 3.9 or higher')
if not shutil.which('ffmpeg'):
quit('ffmpeg is not installed!')
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx')
if not os.path.isfile(model_path):
quit('File "inswapper_128.onnx" does not exist!')
if '--gpu' in sys.argv:
NVIDIA_PROVIDERS = ['CUDAExecutionProvider', 'TensorrtExecutionProvider']
if len(list(set(roop.globals.providers) - set(NVIDIA_PROVIDERS))) == 1:
CUDA_VERSION = torch.version.cuda
CUDNN_VERSION = torch.backends.cudnn.version()
if not torch.cuda.is_available() or not CUDA_VERSION:
quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.")
if CUDA_VERSION > '11.8':
quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8")
if CUDA_VERSION < '11.4':
quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8")
if CUDNN_VERSION < 8220:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1")
if CUDNN_VERSION > 8910:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1")
else:
roop.globals.providers = ['CPUExecutionProvider']
if '--all-faces' in sys.argv or '-a' in sys.argv:
roop.globals.all_faces = True
def start_processing():
frame_paths = args["frame_paths"]
n = len(frame_paths) // (args['cores_count'])
# single thread
if args['gpu'] or n < 2:
process_video(args['source_img'], args["frame_paths"])
return
# multithread if total frames to cpu cores ratio is greater than 2
if n > 2:
processes = []
for i in range(0, len(frame_paths), n):
p = pool.apply_async(process_video, args=(args['source_img'], frame_paths[i:i+n],))
processes.append(p)
for p in processes:
p.get()
pool.close()
pool.join()
def preview_image(image_path):
img = Image.open(image_path)
img = img.resize((180, 180), Image.ANTIALIAS)
photo_img = ImageTk.PhotoImage(img)
left_frame = tk.Frame(window)
left_frame.place(x=60, y=100)
img_label = tk.Label(left_frame, image=photo_img)
img_label.image = photo_img
img_label.pack()
def preview_video(video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print("Error opening video file")
return
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(frame)
img = img.resize((180, 180), Image.ANTIALIAS)
photo_img = ImageTk.PhotoImage(img)
right_frame = tk.Frame(window)
right_frame.place(x=360, y=100)
img_label = tk.Label(right_frame, image=photo_img)
img_label.image = photo_img
img_label.pack()
cap.release()
def select_face():
args['source_img'] = filedialog.askopenfilename(title="Select a face")
preview_image(args['source_img'])
def select_target():
args['target_path'] = filedialog.askopenfilename(title="Select a target")
threading.Thread(target=preview_video, args=(args['target_path'],)).start()
def toggle_fps_limit():
args['keep_fps'] = int(limit_fps.get() != True)
def toggle_all_faces():
roop.globals.all_faces = True if all_faces.get() == 1 else False
def toggle_keep_frames():
args['keep_frames'] = int(keep_frames.get())
def save_file():
filename, ext = 'output.mp4', '.mp4'
if is_img(args['target_path']):
filename, ext = 'output.png', '.png'
args['output_file'] = asksaveasfilename(initialfile=filename, defaultextension=ext, filetypes=[("All Files","*.*"),("Videos","*.mp4")])
def status(string):
if 'cli_mode' in args:
print("Status: " + string)
else:
status_label["text"] = "Status: " + string
window.update()
def start():
if not args['source_img'] or not os.path.isfile(args['source_img']):
print("\n[WARNING] Please select an image containing a face.")
return
elif not args['target_path'] or not os.path.isfile(args['target_path']):
print("\n[WARNING] Please select a video/image to swap face in.")
return
if not args['output_file']:
target_path = args['target_path']
args['output_file'] = rreplace(target_path, "/", "/swapped-", 1) if "/" in target_path else "swapped-" + target_path
global pool
pool = mp.Pool(args['cores_count'])
target_path = args['target_path']
test_face = get_face_single(cv2.imread(args['source_img']))
if not test_face:
print("\n[WARNING] No face detected in source image. Please try with another one.\n")
return
if is_img(target_path):
if predict_image(target_path) > 0.85:
quit()
process_img(args['source_img'], target_path, args['output_file'])
status("swap successful!")
return
seconds, probabilities = predict_video_frames(video_path=args['target_path'], frame_interval=100)
if any(probability > 0.85 for probability in probabilities):
quit()
video_name_full = target_path.split("/")[-1]
video_name = os.path.splitext(video_name_full)[0]
output_dir = os.path.dirname(target_path) + "/" + video_name
Path(output_dir).mkdir(exist_ok=True)
status("detecting video's FPS...")
fps, exact_fps = detect_fps(target_path)
if not args['keep_fps'] and fps > 30:
this_path = output_dir + "/" + video_name + ".mp4"
set_fps(target_path, this_path, 30)
target_path, exact_fps = this_path, 30
else:
shutil.copy(target_path, output_dir)
status("extracting frames...")
extract_frames(target_path, output_dir)
args['frame_paths'] = tuple(sorted(
glob.glob(output_dir + "/*.png"),
key=lambda x: int(x.split(sep)[-1].replace(".png", ""))
))
status("swapping in progress...")
start_processing()
status("creating video...")
create_video(video_name, exact_fps, output_dir)
status("adding audio...")
add_audio(output_dir, target_path, video_name_full, args['keep_frames'], args['output_file'])
save_path = args['output_file'] if args['output_file'] else output_dir + "/" + video_name + ".mp4"
print("\n\nVideo saved as:", save_path, "\n\n")
status("swap successful!")
def run():
global all_faces, keep_frames, limit_fps, status_label, window
pre_check()
limit_resources()
if args['source_img']:
args['cli_mode'] = True
start()
quit()
window = tk.Tk()
window.geometry("600x700")
window.title("roop")
window.configure(bg="#2d3436")
window.resizable(width=False, height=False)
# Contact information
support_link = tk.Label(window, text="Donate to project <3", fg="#fd79a8", bg="#2d3436", cursor="hand2", font=("Arial", 8))
support_link.place(x=180,y=20,width=250,height=30)
support_link.bind("<Button-1>", lambda e: webbrowser.open("https://github.com/sponsors/s0md3v"))
# Select a face button
face_button = tk.Button(window, text="Select a face", command=select_face, bg="#2d3436", fg="#74b9ff", highlightthickness=4, relief="flat", highlightbackground="#74b9ff", activebackground="#74b9ff", borderwidth=4)
face_button.place(x=60,y=320,width=180,height=80)
# Select a target button
target_button = tk.Button(window, text="Select a target", command=select_target, bg="#2d3436", fg="#74b9ff", highlightthickness=4, relief="flat", highlightbackground="#74b9ff", activebackground="#74b9ff", borderwidth=4)
target_button.place(x=360,y=320,width=180,height=80)
# All faces checkbox
all_faces = tk.IntVar()
all_faces_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Process all faces in frame", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=all_faces, command=toggle_all_faces)
all_faces_checkbox.place(x=60,y=500,width=240,height=31)
# FPS limit checkbox
limit_fps = tk.IntVar(None, not args['keep_fps'])
fps_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Limit FPS to 30", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=limit_fps, command=toggle_fps_limit)
fps_checkbox.place(x=60,y=475,width=240,height=31)
# Keep frames checkbox
keep_frames = tk.IntVar(None, args['keep_frames'])
frames_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Keep frames dir", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=keep_frames, command=toggle_keep_frames)
frames_checkbox.place(x=60,y=450,width=240,height=31)
# Start button
start_button = tk.Button(window, text="Start", bg="#f1c40f", relief="flat", borderwidth=0, highlightthickness=0, command=lambda: [save_file(), start()])
start_button.place(x=240,y=560,width=120,height=49)
# Status label
status_label = tk.Label(window, width=580, justify="center", text="Status: waiting for input...", fg="#2ecc71", bg="#2d3436")
status_label.place(x=10,y=640,width=580,height=30)
window.mainloop()

View File

@ -2,8 +2,8 @@ import os
from tqdm import tqdm from tqdm import tqdm
import cv2 import cv2
import insightface import insightface
import core.globals import roop.globals
from core.analyser import get_face_single, get_face_many from roop.analyser import get_face_single, get_face_many
FACE_SWAPPER = None FACE_SWAPPER = None
@ -12,7 +12,7 @@ def get_face_swapper():
global FACE_SWAPPER global FACE_SWAPPER
if FACE_SWAPPER is None: if FACE_SWAPPER is None:
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx') model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../inswapper_128.onnx')
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=core.globals.providers) FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.providers)
return FACE_SWAPPER return FACE_SWAPPER
@ -49,7 +49,7 @@ def process_video(source_img, frame_paths):
for frame_path in frame_paths: for frame_path in frame_paths:
frame = cv2.imread(frame_path) frame = cv2.imread(frame_path)
try: try:
result = process_faces(source_face, frame, progress, core.globals.all_faces) result = process_faces(source_face, frame, progress, roop.globals.all_faces)
cv2.imwrite(frame_path, result) cv2.imwrite(frame_path, result)
except Exception: except Exception:
progress.set_postfix(status='E', refresh=True) progress.set_postfix(status='E', refresh=True)

View File

@ -31,24 +31,24 @@ def detect_fps(input_path):
def set_fps(input_path, output_path, fps): def set_fps(input_path, output_path, fps):
input_path, output_path = path(input_path), path(output_path) input_path, output_path = path(input_path), path(output_path)
os.system(f'ffmpeg -i "{input_path}" -filter:v fps=fps={fps} "{output_path}"') os.system(f'ffmpeg -i "{input_path}" -filter:v fps=fps={fps} "{output_path}" -loglevel error')
def create_video(video_name, fps, output_dir): def create_video(video_name, fps, output_dir):
output_dir = path(output_dir) output_dir = path(output_dir)
os.system(f'ffmpeg -framerate "{fps}" -i "{output_dir}{sep}%04d.png" -c:v libx264 -crf 7 -pix_fmt yuv420p -y "{output_dir}{sep}output.mp4"') os.system(f'ffmpeg -framerate "{fps}" -i "{output_dir}{sep}%04d.png" -c:v libx264 -crf 7 -pix_fmt yuv420p -y "{output_dir}{sep}output.mp4" -loglevel error')
def extract_frames(input_path, output_dir): def extract_frames(input_path, output_dir):
input_path, output_dir = path(input_path), path(output_dir) input_path, output_dir = path(input_path), path(output_dir)
os.system(f'ffmpeg -i "{input_path}" "{output_dir}{sep}%04d.png"') os.system(f'ffmpeg -i "{input_path}" "{output_dir}{sep}%04d.png" -loglevel error')
def add_audio(output_dir, target_path, video, keep_frames, output_file): def add_audio(output_dir, target_path, video, keep_frames, output_file):
video_name = os.path.splitext(video)[0] video_name = os.path.splitext(video)[0]
save_to = output_file if output_file else output_dir + "/swapped-" + video_name + ".mp4" save_to = output_file if output_file else output_dir + "/swapped-" + video_name + ".mp4"
save_to_ff, output_dir_ff = path(save_to), path(output_dir) save_to_ff, output_dir_ff = path(save_to), path(output_dir)
os.system(f'ffmpeg -i "{output_dir_ff}{sep}output.mp4" -i "{output_dir_ff}{sep}{video}" -c:v copy -map 0:v:0 -map 1:a:0 -y "{save_to_ff}"') os.system(f'ffmpeg -i "{output_dir_ff}{sep}output.mp4" -i "{output_dir_ff}{sep}{video}" -c:v copy -map 0:v:0 -map 1:a:0 -y "{save_to_ff}" -loglevel error')
if not os.path.isfile(save_to): if not os.path.isfile(save_to):
shutil.move(output_dir + "/output.mp4", save_to) shutil.move(output_dir + "/output.mp4", save_to)
if not keep_frames: if not keep_frames:

283
run.py
View File

@ -1,283 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import platform from roop import core
import signal
import sys
import shutil
import glob
import argparse
import multiprocessing as mp
import os
import torch
from pathlib import Path
import tkinter as tk
from tkinter import filedialog
from opennsfw2 import predict_video_frames, predict_image
from tkinter.filedialog import asksaveasfilename
import webbrowser
import psutil
import cv2
import threading
from PIL import Image, ImageTk
import core.globals
from core.swapper import process_video, process_img
from core.utils import is_img, detect_fps, set_fps, create_video, add_audio, extract_frames, rreplace
from core.analyser import get_face_single
if 'ROCMExecutionProvider' in core.globals.providers: if __name__ == '__main__':
del torch core.run()
pool = None
args = {}
signal.signal(signal.SIGINT, lambda signal_number, frame: quit())
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--face', help='use this face', dest='source_img')
parser.add_argument('-t', '--target', help='replace this face', dest='target_path')
parser.add_argument('-o', '--output', help='save output to this file', dest='output_file')
parser.add_argument('--gpu', help='use gpu', dest='gpu', action='store_true', default=False)
parser.add_argument('--keep-fps', help='maintain original fps', dest='keep_fps', action='store_true', default=False)
parser.add_argument('--keep-frames', help='keep frames directory', dest='keep_frames', action='store_true', default=False)
parser.add_argument('--max-memory', help='maximum amount of RAM in GB to be used', type=int)
parser.add_argument('--max-cores', help='number of cores to be use for CPU mode', dest='cores_count', type=int, default=max(psutil.cpu_count() - 2, 2))
parser.add_argument('--all-faces', help='swap all faces in frame', dest='all_faces', action='store_true', default=False)
for name, value in vars(parser.parse_args()).items():
args[name] = value
sep = "/"
if os.name == "nt":
sep = "\\"
def limit_resources():
if args['max_memory']:
memory = args['max_memory'] * 1024 * 1024 * 1024
if str(platform.system()).lower() == 'windows':
import ctypes
kernel32 = ctypes.windll.kernel32
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
else:
import resource
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
def pre_check():
if sys.version_info < (3, 9):
quit('Python version is not supported - please upgrade to 3.9 or higher')
if not shutil.which('ffmpeg'):
quit('ffmpeg is not installed!')
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'inswapper_128.onnx')
if not os.path.isfile(model_path):
quit('File "inswapper_128.onnx" does not exist!')
if '--gpu' in sys.argv:
NVIDIA_PROVIDERS = ['CUDAExecutionProvider', 'TensorrtExecutionProvider']
if len(list(set(core.globals.providers) - set(NVIDIA_PROVIDERS))) == 1:
CUDA_VERSION = torch.version.cuda
CUDNN_VERSION = torch.backends.cudnn.version()
if not torch.cuda.is_available() or not CUDA_VERSION:
quit("You are using --gpu flag but CUDA isn't available or properly installed on your system.")
if CUDA_VERSION > '11.8':
quit(f"CUDA version {CUDA_VERSION} is not supported - please downgrade to 11.8")
if CUDA_VERSION < '11.4':
quit(f"CUDA version {CUDA_VERSION} is not supported - please upgrade to 11.8")
if CUDNN_VERSION < 8220:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please upgrade to 8.9.1")
if CUDNN_VERSION > 8910:
quit(f"CUDNN version {CUDNN_VERSION} is not supported - please downgrade to 8.9.1")
else:
core.globals.providers = ['CPUExecutionProvider']
def start_processing():
if args['gpu']:
process_video(args['source_img'], args["frame_paths"])
return
frame_paths = args["frame_paths"]
n = len(frame_paths)//(args['cores_count'])
processes = []
for i in range(0, len(frame_paths), n):
p = pool.apply_async(process_video, args=(args['source_img'], frame_paths[i:i+n],))
processes.append(p)
for p in processes:
p.get()
pool.close()
pool.join()
def preview_image(image_path):
img = Image.open(image_path)
img = img.resize((180, 180), Image.ANTIALIAS)
photo_img = ImageTk.PhotoImage(img)
left_frame = tk.Frame(window)
left_frame.place(x=60, y=100)
img_label = tk.Label(left_frame, image=photo_img)
img_label.image = photo_img
img_label.pack()
def preview_video(video_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print("Error opening video file")
return
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(frame)
img = img.resize((180, 180), Image.ANTIALIAS)
photo_img = ImageTk.PhotoImage(img)
right_frame = tk.Frame(window)
right_frame.place(x=360, y=100)
img_label = tk.Label(right_frame, image=photo_img)
img_label.image = photo_img
img_label.pack()
cap.release()
def select_face():
args['source_img'] = filedialog.askopenfilename(title="Select a face")
preview_image(args['source_img'])
def select_target():
args['target_path'] = filedialog.askopenfilename(title="Select a target")
threading.Thread(target=preview_video, args=(args['target_path'],)).start()
def toggle_fps_limit():
args['keep_fps'] = int(limit_fps.get() != True)
def toggle_all_faces():
core.globals.all_faces = True if all_faces.get() == 1 else False
def toggle_keep_frames():
args['keep_frames'] = int(keep_frames.get())
def save_file():
filename, ext = 'output.mp4', '.mp4'
if is_img(args['target_path']):
filename, ext = 'output.png', '.png'
args['output_file'] = asksaveasfilename(initialfile=filename, defaultextension=ext, filetypes=[("All Files","*.*"),("Videos","*.mp4")])
def status(string):
if 'cli_mode' in args:
print("Status: " + string)
else:
status_label["text"] = "Status: " + string
window.update()
def start():
if not args['source_img'] or not os.path.isfile(args['source_img']):
print("\n[WARNING] Please select an image containing a face.")
return
elif not args['target_path'] or not os.path.isfile(args['target_path']):
print("\n[WARNING] Please select a video/image to swap face in.")
return
if not args['output_file']:
target_path = args['target_path']
args['output_file'] = rreplace(target_path, "/", "/swapped-", 1) if "/" in target_path else "swapped-" + target_path
global pool
pool = mp.Pool(args['cores_count'])
target_path = args['target_path']
test_face = get_face_single(cv2.imread(args['source_img']))
if not test_face:
print("\n[WARNING] No face detected in source image. Please try with another one.\n")
return
if is_img(target_path):
if predict_image(target_path) > 0.85:
quit()
process_img(args['source_img'], target_path, args['output_file'])
status("swap successful!")
return
seconds, probabilities = predict_video_frames(video_path=args['target_path'], frame_interval=100)
if any(probability > 0.85 for probability in probabilities):
quit()
video_name_full = target_path.split("/")[-1]
video_name = os.path.splitext(video_name_full)[0]
output_dir = os.path.dirname(target_path) + "/" + video_name if os.path.dirname(target_path) else video_name
Path(output_dir).mkdir(exist_ok=True)
status("detecting video's FPS...")
fps, exact_fps = detect_fps(target_path)
if not args['keep_fps'] and fps > 30:
this_path = output_dir + "/" + video_name + ".mp4"
set_fps(target_path, this_path, 30)
target_path, exact_fps = this_path, 30
else:
shutil.copy(target_path, output_dir)
status("extracting frames...")
extract_frames(target_path, output_dir)
args['frame_paths'] = tuple(sorted(
glob.glob(output_dir + "/*.png"),
key=lambda x: int(x.split(sep)[-1].replace(".png", ""))
))
status("swapping in progress...")
start_processing()
status("creating video...")
create_video(video_name, exact_fps, output_dir)
status("adding audio...")
add_audio(output_dir, target_path, video_name_full, args['keep_frames'], args['output_file'])
save_path = args['output_file'] if args['output_file'] else output_dir + "/" + video_name + ".mp4"
print("\n\nVideo saved as:", save_path, "\n\n")
status("swap successful!")
if __name__ == "__main__":
global status_label, window
pre_check()
limit_resources()
if args['source_img']:
args['cli_mode'] = True
start()
quit()
window = tk.Tk()
window.geometry("600x700")
window.title("roop")
window.configure(bg="#2d3436")
window.resizable(width=False, height=False)
# Contact information
support_link = tk.Label(window, text="Donate to project <3", fg="#fd79a8", bg="#2d3436", cursor="hand2", font=("Arial", 8))
support_link.place(x=180,y=20,width=250,height=30)
support_link.bind("<Button-1>", lambda e: webbrowser.open("https://github.com/sponsors/s0md3v"))
# Select a face button
face_button = tk.Button(window, text="Select a face", command=select_face, bg="#2d3436", fg="#74b9ff", highlightthickness=4, relief="flat", highlightbackground="#74b9ff", activebackground="#74b9ff", borderwidth=4)
face_button.place(x=60,y=320,width=180,height=80)
# Select a target button
target_button = tk.Button(window, text="Select a target", command=select_target, bg="#2d3436", fg="#74b9ff", highlightthickness=4, relief="flat", highlightbackground="#74b9ff", activebackground="#74b9ff", borderwidth=4)
target_button.place(x=360,y=320,width=180,height=80)
# All faces checkbox
all_faces = tk.IntVar()
all_faces_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Process all faces in frame", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=all_faces, command=toggle_all_faces)
all_faces_checkbox.place(x=60,y=500,width=240,height=31)
# FPS limit checkbox
limit_fps = tk.IntVar(None, not args['keep_fps'])
fps_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Limit FPS to 30", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=limit_fps, command=toggle_fps_limit)
fps_checkbox.place(x=60,y=475,width=240,height=31)
# Keep frames checkbox
keep_frames = tk.IntVar(None, args['keep_frames'])
frames_checkbox = tk.Checkbutton(window, anchor="w", relief="groove", activebackground="#2d3436", activeforeground="#74b9ff", selectcolor="black", text="Keep frames dir", fg="#dfe6e9", borderwidth=0, highlightthickness=0, bg="#2d3436", variable=keep_frames, command=toggle_keep_frames)
frames_checkbox.place(x=60,y=450,width=240,height=31)
# Start button
start_button = tk.Button(window, text="Start", bg="#f1c40f", relief="flat", borderwidth=0, highlightthickness=0, command=lambda: [save_file(), start()])
start_button.place(x=240,y=560,width=120,height=49)
# Status label
status_label = tk.Label(window, width=580, justify="center", text="Status: waiting for input...", fg="#2ecc71", bg="#2d3436")
status_label.place(x=10,y=640,width=580,height=30)
window.mainloop()