SpiderReddy commited on
Commit
1e81919
·
verified ·
1 Parent(s): fa2c889

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -13
app.py CHANGED
@@ -1,11 +1,10 @@
1
  import gradio as gr
2
  import cv2
3
- from moviepy.editor import VideoFileClip, ImageSequenceClip
4
  import numpy as np
5
  from diffusers import AutoPipelineForImage2Image
6
  from diffusers.utils import load_image
7
 
8
- # Load the anime-style model
9
  pipe = AutoPipelineForImage2Image.from_pretrained(
10
  "nitrosocke/Arcane-Diffusion",
11
  safety_checker=None,
@@ -14,27 +13,41 @@ pipe.to("cuda")
14
 
15
  # Function to process a single frame
16
  def process_frame(frame, prompt):
17
- # Convert frame from BGR (OpenCV) to RGB
18
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
19
- # Load the frame as an image for the model
20
- image = load_image(frame)
21
  # Apply the anime-style transformation
22
  result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
 
23
  return np.array(result)
24
 
25
  # Function to convert the entire video
26
  def video_to_anime(video_path, prompt="Arcane style"):
27
- # Load the video and extract frames
28
- clip = VideoFileClip(video_path)
29
- frames = [frame for frame in clip.iter_frames()]
 
 
 
 
 
 
 
30
 
31
- # Process each frame with the anime-style model
32
  processed_frames = [process_frame(frame, prompt) for frame in frames]
33
 
34
- # Reassemble the processed frames into a video
35
- new_clip = ImageSequenceClip(processed_frames, fps=clip.fps)
 
36
  output_path = "output.mp4"
37
- new_clip.write_videofile(output_path, codec="libx264")
 
 
 
 
 
38
 
39
  return output_path
40
 
 
1
  import gradio as gr
2
  import cv2
 
3
  import numpy as np
4
  from diffusers import AutoPipelineForImage2Image
5
  from diffusers.utils import load_image
6
 
7
+ # Load the anime-style diffusion model
8
  pipe = AutoPipelineForImage2Image.from_pretrained(
9
  "nitrosocke/Arcane-Diffusion",
10
  safety_checker=None,
 
13
 
14
  # Function to process a single frame
15
  def process_frame(frame, prompt):
16
+ # Convert frame from BGR (OpenCV format) to RGB (expected by the model)
17
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
18
+ # Load the frame as an image for the diffusion model
19
+ image = load_image(frame_rgb)
20
  # Apply the anime-style transformation
21
  result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
22
+ # Convert back to numpy array
23
  return np.array(result)
24
 
25
  # Function to convert the entire video
26
  def video_to_anime(video_path, prompt="Arcane style"):
27
+ # Read the input video using OpenCV
28
+ cap = cv2.VideoCapture(video_path)
29
+ fps = cap.get(cv2.CAP_PROP_FPS)
30
+ frames = []
31
+ while cap.isOpened():
32
+ ret, frame = cap.read()
33
+ if not ret:
34
+ break
35
+ frames.append(frame)
36
+ cap.release()
37
 
38
+ # Process each frame to anime style
39
  processed_frames = [process_frame(frame, prompt) for frame in frames]
40
 
41
+ # Write the output video using OpenCV
42
+ height, width, _ = processed_frames[0].shape
43
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for MP4
44
  output_path = "output.mp4"
45
+ out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
46
+ for frame in processed_frames:
47
+ # Convert back to BGR for OpenCV
48
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
49
+ out.write(frame_bgr)
50
+ out.release()
51
 
52
  return output_path
53