Spaces:
Running
Running
import gradio as gr | |
import cv2 | |
from moviepy.editor import VideoFileClip, ImageSequenceClip | |
import numpy as np | |
from diffusers import AutoPipelineForImage2Image | |
from diffusers.utils import load_image | |
# Load the anime-style model | |
pipe = AutoPipelineForImage2Image.from_pretrained( | |
"nitrosocke/Arcane-Diffusion", | |
safety_checker=None, | |
) | |
pipe.to("cuda") | |
# Function to process a single frame | |
def process_frame(frame, prompt): | |
# Convert frame from BGR (OpenCV) to RGB | |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
# Load the frame as an image for the model | |
image = load_image(frame) | |
# Apply the anime-style transformation | |
result = pipe(prompt=prompt, image=image, strength=0.75).images[0] | |
return np.array(result) | |
# Function to convert the entire video | |
def video_to_anime(video_path, prompt="Arcane style"): | |
# Load the video and extract frames | |
clip = VideoFileClip(video_path) | |
frames = [frame for frame in clip.iter_frames()] | |
# Process each frame with the anime-style model | |
processed_frames = [process_frame(frame, prompt) for frame in frames] | |
# Reassemble the processed frames into a video | |
new_clip = ImageSequenceClip(processed_frames, fps=clip.fps) | |
output_path = "output.mp4" | |
new_clip.write_videofile(output_path, codec="libx264") | |
return output_path | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=video_to_anime, | |
inputs=[ | |
gr.Video(label="Input Video"), | |
gr.Textbox(label="Style Prompt", default="Arcane style") | |
], | |
outputs=gr.Video(label="Output Video"), | |
title="Video to Anime Converter", | |
description="Upload a video and convert it to anime style!" | |
) | |
# Launch the interface | |
iface.launch() |