import os
import cv2
import gradio as gr
import AnimeGANv3_src
import numpy as np
import logging
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class AnimeGANv3:
def __init__(self):
# Ensure directories exist
os.makedirs('output', exist_ok=True)
os.makedirs('frames', exist_ok=True)
def process_frame(self, frame, style_code, det_face):
"""Process a single frame with AnimeGANv3."""
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output = AnimeGANv3_src.Convert(frame_rgb, style_code, det_face)
return output[:, :, ::-1] # Convert back to BGR for OpenCV
def inference(self, video_path, style, if_face=None):
logging.info(f"Starting inference: video={video_path}, style={style}, face_detection={if_face}")
try:
# Map style names to codes
style_codes = {
"AnimeGANv3_Arcane": "A",
"AnimeGANv3_Trump v1.0": "T",
"AnimeGANv3_Shinkai": "S",
"AnimeGANv3_PortraitSketch": "P",
"AnimeGANv3_Hayao": "H",
"AnimeGANv3_Disney v1.0": "D",
"AnimeGANv3_JP_face v1.0": "J",
"AnimeGANv3_Kpop v2.0": "K",
}
style_code = style_codes.get(style, "U")
det_face = if_face == "Yes"
# Open the input video and extract frames
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise Exception("Could not open video file")
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frames.append(frame)
cap.release()
logging.info(f"Extracted {frame_count} frames at {fps} FPS to process")
# Process each frame and save as PNG with logging
for idx, frame in enumerate(frames):
stylized_frame = self.process_frame(frame, style_code, det_face)
png_filename = f'frames/frame_{idx:04d}.png'
cv2.imwrite(png_filename, stylized_frame)
logging.info(f"Processed and saved frame {idx + 1}/{frame_count} as {png_filename}")
logging.info("All frames processed and saved as PNGs")
# Combine PNGs into video using ffmpeg
save_path = "output/out.mp4"
os.system(f"ffmpeg -framerate {fps} -i frames/frame_%04d.png -c:v libx264 -pix_fmt yuv420p {save_path} -y")
# Check if the video was created
if not os.path.exists(save_path):
raise Exception("Failed to create output video with ffmpeg")
logging.info(f"Video created: {save_path}")
return save_path
except Exception as error:
logging.error(f"Error: {str(error)}")
return None
# Create an instance of the AnimeGANv3 class
anime_gan = AnimeGANv3()
# Define the Gradio interface
title = "AnimeGANv3: Video to Anime Converter"
description = r"""Upload a video to convert it into anime style using AnimeGANv3.
Select a style and choose whether to optimize for faces.
AnimeGANv3 GitHub |
Patreon"""
iface = gr.Interface(
fn=anime_gan.inference,
inputs=[
gr.Video(label="Input Video"),
gr.Dropdown(choices=[
'AnimeGANv3_Hayao',
'AnimeGANv3_Shinkai',
'AnimeGANv3_Arcane',
'AnimeGANv3_Trump v1.0',
'AnimeGANv3_Disney v1.0',
'AnimeGANv3_PortraitSketch',
'AnimeGANv3_JP_face v1.0',
'AnimeGANv3_Kpop v2.0',
], label='AnimeGANv3 Style', value='AnimeGANv3_Arcane'),
gr.Radio(choices=["Yes", "No"], label='Extract face', value="No"),
],
outputs=[
gr.Video(label="Output Video")
],
title=title,
description=description,
allow_flagging="never"
)
# Launch the interface
iface.launch()