SpiderReddy commited on
Commit
47f95ed
·
verified ·
1 Parent(s): 8016ac3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -30
app.py CHANGED
@@ -10,20 +10,18 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
10
 
11
  class AnimeGANv3:
12
  def __init__(self):
13
- # Ensure directories exist
14
  os.makedirs('output', exist_ok=True)
15
  os.makedirs('frames', exist_ok=True)
 
16
 
17
  def process_frame(self, frame, style_code, det_face):
18
- """Process a single frame with AnimeGANv3."""
19
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
20
  output = AnimeGANv3_src.Convert(frame_rgb, style_code, det_face)
21
- return output[:, :, ::-1] # Convert back to BGR for OpenCV
22
 
23
  def inference(self, video_path, style, if_face=None):
24
  logging.info(f"Starting inference: video={video_path}, style={style}, face_detection={if_face}")
25
  try:
26
- # Map style names to codes
27
  style_codes = {
28
  "AnimeGANv3_Arcane": "A",
29
  "AnimeGANv3_Trump v1.0": "T",
@@ -37,40 +35,49 @@ class AnimeGANv3:
37
  style_code = style_codes.get(style, "U")
38
  det_face = if_face == "Yes"
39
 
40
- # Open the input video and extract frames
41
  cap = cv2.VideoCapture(video_path)
42
  if not cap.isOpened():
43
  raise Exception("Could not open video file")
44
 
45
  fps = cap.get(cv2.CAP_PROP_FPS)
46
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
47
- frames = []
48
-
49
- while cap.isOpened():
50
- ret, frame = cap.read()
51
- if not ret:
52
- break
53
- frames.append(frame)
54
-
55
- cap.release()
56
  logging.info(f"Extracted {frame_count} frames at {fps} FPS to process")
57
 
58
- # Process each frame and save as PNG with logging
59
- for idx, frame in enumerate(frames):
60
- stylized_frame = self.process_frame(frame, style_code, det_face)
61
- png_filename = f'frames/frame_{idx:04d}.png'
62
- cv2.imwrite(png_filename, stylized_frame)
63
- logging.info(f"Processed and saved frame {idx + 1}/{frame_count} as {png_filename}")
 
64
 
65
- logging.info("All frames processed and saved as PNGs")
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- # Combine PNGs into video using ffmpeg
68
- save_path = "output/out.mp4"
69
- os.system(f"ffmpeg -framerate {fps} -i frames/frame_%04d.png -c:v libx264 -pix_fmt yuv420p {save_path} -y")
 
 
 
 
70
 
71
- # Check if the video was created
72
- if not os.path.exists(save_path):
73
- raise Exception("Failed to create output video with ffmpeg")
 
 
74
 
75
  logging.info(f"Video created: {save_path}")
76
  return save_path
@@ -78,10 +85,10 @@ class AnimeGANv3:
78
  logging.error(f"Error: {str(error)}")
79
  return None
80
 
81
- # Create an instance of the AnimeGANv3 class
82
  anime_gan = AnimeGANv3()
83
 
84
- # Define the Gradio interface
85
  title = "AnimeGANv3: Video to Anime Converter"
86
  description = r"""Upload a video to convert it into anime style using AnimeGANv3.<br>
87
  Select a style and choose whether to optimize for faces.<br>
@@ -112,5 +119,4 @@ iface = gr.Interface(
112
  allow_flagging="never"
113
  )
114
 
115
- # Launch the interface
116
  iface.launch()
 
10
 
11
  class AnimeGANv3:
12
  def __init__(self):
 
13
  os.makedirs('output', exist_ok=True)
14
  os.makedirs('frames', exist_ok=True)
15
+ logging.info(f"Available ONNX Runtime providers: {ort.get_available_providers()}")
16
 
17
  def process_frame(self, frame, style_code, det_face):
 
18
  frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
19
  output = AnimeGANv3_src.Convert(frame_rgb, style_code, det_face)
20
+ return output[:, :, ::-1]
21
 
22
  def inference(self, video_path, style, if_face=None):
23
  logging.info(f"Starting inference: video={video_path}, style={style}, face_detection={if_face}")
24
  try:
 
25
  style_codes = {
26
  "AnimeGANv3_Arcane": "A",
27
  "AnimeGANv3_Trump v1.0": "T",
 
35
  style_code = style_codes.get(style, "U")
36
  det_face = if_face == "Yes"
37
 
38
+ # Open video
39
  cap = cv2.VideoCapture(video_path)
40
  if not cap.isOpened():
41
  raise Exception("Could not open video file")
42
 
43
  fps = cap.get(cv2.CAP_PROP_FPS)
44
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
 
 
 
 
 
 
 
 
45
  logging.info(f"Extracted {frame_count} frames at {fps} FPS to process")
46
 
47
+ # Process in batches
48
+ batch_size = 50 # Adjust based on testing (e.g., 50 frames per batch)
49
+ save_path = "output/out.mp4"
50
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
51
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
52
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
53
+ out = None # Video writer initialized later
54
 
55
+ frame_idx = 0
56
+ while cap.isOpened():
57
+ batch_frames = []
58
+ for _ in range(batch_size):
59
+ ret, frame = cap.read()
60
+ if not ret:
61
+ break
62
+ batch_frames.append(frame)
63
+ frame_idx += 1
64
+
65
+ if not batch_frames:
66
+ break
67
 
68
+ # Process batch
69
+ for idx, frame in enumerate(batch_frames):
70
+ stylized_frame = self.process_frame(frame, style_code, det_face)
71
+ if out is None: # Initialize writer on first frame
72
+ out = cv2.VideoWriter(save_path, fourcc, fps, (width, height))
73
+ out.write(stylized_frame)
74
+ logging.info(f"Processed frame {frame_idx - len(batch_frames) + idx + 1}/{frame_count}")
75
 
76
+ cap.release()
77
+ if out:
78
+ out.release()
79
+ else:
80
+ raise Exception("No frames processed")
81
 
82
  logging.info(f"Video created: {save_path}")
83
  return save_path
 
85
  logging.error(f"Error: {str(error)}")
86
  return None
87
 
88
+ # Create an instance
89
  anime_gan = AnimeGANv3()
90
 
91
+ # Gradio interface
92
  title = "AnimeGANv3: Video to Anime Converter"
93
  description = r"""Upload a video to convert it into anime style using AnimeGANv3.<br>
94
  Select a style and choose whether to optimize for faces.<br>
 
119
  allow_flagging="never"
120
  )
121
 
 
122
  iface.launch()