File size: 2,044 Bytes
fa2c889
 
 
 
 
717b40c
2ba9149
1e81919
fa2c889
 
 
 
006a785
fa2c889
 
 
717b40c
 
 
fa2c889
 
 
 
 
1e81919
 
 
 
 
 
 
 
 
fa2c889
fd8a3d3
fa2c889
 
fd8a3d3
1e81919
fd8a3d3
fa2c889
1e81919
 
 
 
 
fa2c889
 
 
 
 
 
 
 
717b40c
fa2c889
 
 
 
 
 
717b40c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import cv2
import numpy as np
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
from PIL import Image  # Add this import
 
# Load the anime-style diffusion model
pipe = AutoPipelineForImage2Image.from_pretrained(
    "nitrosocke/Arcane-Diffusion",
    safety_checker=None,
)
# Running on CPU by default (no .to("cuda"))

# Function to process a single frame
def process_frame(frame, prompt):
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # Convert BGR to RGB
    pil_image = Image.fromarray(frame_rgb)  # Convert NumPy array to PIL image
    image = load_image(pil_image)  # Pass PIL image to load_image
    result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
    return np.array(result)

# Function to convert the entire video
def video_to_anime(video_path, prompt="Arcane style"):
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    frames = []
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        frames.append(frame)
    cap.release()

    # Process each frame
    processed_frames = [process_frame(frame, prompt) for frame in frames]

    # Write the output video
    height, width, _ = processed_frames[0].shape
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    output_path = "output.mp4"
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    for frame in processed_frames:
        frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        out.write(frame_bgr)
    out.release()

    return output_path

# Create the Gradio interface
iface = gr.Interface(
    fn=video_to_anime,
    inputs=[
        gr.Video(label="Input Video"),
        gr.Textbox(label="Style Prompt", value="Arcane style")
    ],
    outputs=gr.Video(label="Output Video"),
    title="Video to Anime Converter",
    description="Upload a video and convert it to anime style!"
)

# Launch the interface with a public link
iface.launch(share=True)  # Added share=True as per the suggestion