SpiderReddy commited on
Commit
aa91c42
·
verified ·
1 Parent(s): 2afe6e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -55
app.py CHANGED
@@ -1,63 +1,101 @@
1
- import gradio as gr
2
  import cv2
 
 
3
  import numpy as np
4
- from diffusers import AutoPipelineForImage2Image
5
- from diffusers.utils import load_image
6
- from PIL import Image # Add this import
7
-
8
- # Load the anime-style diffusion model
9
- pipe = AutoPipelineForImage2Image.from_pretrained(
10
- "nitrosocke/Arcane-Diffusion",
11
- safety_checker=None,
12
- )
13
- # Running on CPU by default (no .to("cuda"))
14
-
15
- # Function to process a single frame
16
- def process_frame(frame, prompt):
17
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
18
- pil_image = Image.fromarray(frame_rgb) # Convert NumPy array to PIL image
19
- image = load_image(pil_image) # Pass PIL image to load_image
20
- result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
21
- return np.array(result)
22
-
23
- # Function to convert the entire video
24
- def video_to_anime(video_path, prompt="Arcane style"):
25
- cap = cv2.VideoCapture(video_path)
26
- fps = cap.get(cv2.CAP_PROP_FPS)
27
- frames = []
28
- while cap.isOpened():
29
- ret, frame = cap.read()
30
- if not ret:
31
- break
32
- frames.append(frame)
33
- cap.release()
34
-
35
- # Process each frame
36
- processed_frames = [process_frame(frame, prompt) for frame in frames]
37
-
38
- # Write the output video
39
- height, width, _ = processed_frames[0].shape
40
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
41
- output_path = "output.mp4"
42
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
43
- for frame in processed_frames:
44
- frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
45
- out.write(frame_bgr)
46
- out.release()
47
-
48
- return output_path
49
-
50
- # Create the Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  iface = gr.Interface(
52
- fn=video_to_anime,
53
  inputs=[
54
  gr.Video(label="Input Video"),
55
- gr.Textbox(label="Style Prompt", value="Arcane style")
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  ],
57
- outputs=gr.Video(label="Output Video"),
58
- title="Video to Anime Converter",
59
- description="Upload a video and convert it to anime style!"
60
  )
61
 
62
- # Launch the interface with a public link
63
- iface.launch(share=True) # Added share=True as per the suggestion
 
1
+ import os
2
  import cv2
3
+ import gradio as gr
4
+ import AnimeGANv3_src
5
  import numpy as np
6
+ from moviepy.editor import VideoFileClip, ImageSequenceClip
7
+
8
+ class AnimeGANv3:
9
+ def __init__(self):
10
+ # Ensure the output directory exists
11
+ os.makedirs('output', exist_ok=True)
12
+
13
+ def process_frame(self, frame, style_code, det_face):
14
+ """Process a single frame with AnimeGANv3."""
15
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
16
+ output = AnimeGANv3_src.Convert(frame_rgb, style_code, det_face)
17
+ return output
18
+
19
+ def inference(self, video_path, style, if_face=None):
20
+ print(video_path, style, if_face)
21
+ try:
22
+ # Map style names to codes
23
+ style_codes = {
24
+ "AnimeGANv3_Arcane": "A",
25
+ "AnimeGANv3_Trump v1.0": "T",
26
+ "AnimeGANv3_Shinkai": "S",
27
+ "AnimeGANv3_PortraitSketch": "P",
28
+ "AnimeGANv3_Hayao": "H",
29
+ "AnimeGANv3_Disney v1.0": "D",
30
+ "AnimeGANv3_JP_face v1.0": "J",
31
+ "AnimeGANv3_Kpop v2.0": "K",
32
+ }
33
+ style_code = style_codes.get(style, "U")
34
+ det_face = if_face == "Yes"
35
+
36
+ # Open the video
37
+ cap = cv2.VideoCapture(video_path)
38
+ if not cap.isOpened():
39
+ raise Exception("Could not open video file")
40
+
41
+ # Get video properties
42
+ fps = cap.get(cv2.CAP_PROP_FPS)
43
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
44
+ print(f"Processing {frame_count} frames at {fps} FPS")
45
+
46
+ # Extract and process frames
47
+ frames = []
48
+ while True:
49
+ ret, frame = cap.read()
50
+ if not ret:
51
+ break
52
+ processed_frame = self.process_frame(frame, style_code, det_face)
53
+ frames.append(processed_frame)
54
+ cap.release()
55
+
56
+ # Convert frames to video
57
+ save_path = "output/out.mp4"
58
+ clip = ImageSequenceClip(frames, fps=fps)
59
+ clip.write_videofile(save_path, codec="libx264", audio=False)
60
+
61
+ return save_path
62
+ except Exception as error:
63
+ print('Error:', error)
64
+ return None
65
+
66
+ # Create an instance of the AnimeGANv3 class
67
+ anime_gan = AnimeGANv3()
68
+
69
+ # Define the Gradio interface
70
+ title = "AnimeGANv3: Video to Anime Converter"
71
+ description = r"""Upload a video to convert it into anime style using AnimeGANv3.<br>
72
+ Select a style and choose whether to optimize for faces.<br>
73
+ <a href='https://github.com/TachibanaYoshino/AnimeGANv3' target='_blank'><b>AnimeGANv3 GitHub</b></a> |
74
+ <a href='https://www.patreon.com/Asher_Chan' target='_blank'><b>Patreon</b></a>"""
75
+
76
  iface = gr.Interface(
77
+ fn=anime_gan.inference,
78
  inputs=[
79
  gr.Video(label="Input Video"),
80
+ gr.Dropdown(choices=[
81
+ 'AnimeGANv3_Hayao',
82
+ 'AnimeGANv3_Shinkai',
83
+ 'AnimeGANv3_Arcane',
84
+ 'AnimeGANv3_Trump v1.0',
85
+ 'AnimeGANv3_Disney v1.0',
86
+ 'AnimeGANv3_PortraitSketch',
87
+ 'AnimeGANv3_JP_face v1.0',
88
+ 'AnimeGANv3_Kpop v2.0',
89
+ ], label='AnimeGANv3 Style', default='AnimeGANv3_Arcane'),
90
+ gr.Radio(choices=["Yes", "No"], label='Extract face', default="No"),
91
+ ],
92
+ outputs=[
93
+ gr.Video(label="Output Video")
94
  ],
95
+ title=title,
96
+ description=description,
97
+ allow_flagging="never"
98
  )
99
 
100
+ # Launch the interface
101
+ iface.launch()