Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,22 +9,17 @@ pipe = AutoPipelineForImage2Image.from_pretrained(
|
|
9 |
"nitrosocke/Arcane-Diffusion",
|
10 |
safety_checker=None,
|
11 |
)
|
12 |
-
|
13 |
|
14 |
# Function to process a single frame
|
15 |
def process_frame(frame, prompt):
|
16 |
-
# Convert frame from BGR (OpenCV format) to RGB (expected by the model)
|
17 |
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
18 |
-
# Load the frame as an image for the diffusion model
|
19 |
image = load_image(frame_rgb)
|
20 |
-
# Apply the anime-style transformation
|
21 |
result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
|
22 |
-
# Convert back to numpy array
|
23 |
return np.array(result)
|
24 |
|
25 |
# Function to convert the entire video
|
26 |
def video_to_anime(video_path, prompt="Arcane style"):
|
27 |
-
# Read the input video using OpenCV
|
28 |
cap = cv2.VideoCapture(video_path)
|
29 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
30 |
frames = []
|
@@ -35,16 +30,15 @@ def video_to_anime(video_path, prompt="Arcane style"):
|
|
35 |
frames.append(frame)
|
36 |
cap.release()
|
37 |
|
38 |
-
# Process each frame
|
39 |
processed_frames = [process_frame(frame, prompt) for frame in frames]
|
40 |
|
41 |
-
# Write the output video
|
42 |
height, width, _ = processed_frames[0].shape
|
43 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
44 |
output_path = "output.mp4"
|
45 |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
46 |
for frame in processed_frames:
|
47 |
-
# Convert back to BGR for OpenCV
|
48 |
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
49 |
out.write(frame_bgr)
|
50 |
out.release()
|
|
|
9 |
"nitrosocke/Arcane-Diffusion",
|
10 |
safety_checker=None,
|
11 |
)
|
12 |
+
# Do NOT move to cuda; let it run on CPU by default
|
13 |
|
14 |
# Function to process a single frame
|
15 |
def process_frame(frame, prompt):
|
|
|
16 |
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
|
17 |
image = load_image(frame_rgb)
|
|
|
18 |
result = pipe(prompt=prompt, image=image, strength=0.75).images[0]
|
|
|
19 |
return np.array(result)
|
20 |
|
21 |
# Function to convert the entire video
|
22 |
def video_to_anime(video_path, prompt="Arcane style"):
|
|
|
23 |
cap = cv2.VideoCapture(video_path)
|
24 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
25 |
frames = []
|
|
|
30 |
frames.append(frame)
|
31 |
cap.release()
|
32 |
|
33 |
+
# Process each frame
|
34 |
processed_frames = [process_frame(frame, prompt) for frame in frames]
|
35 |
|
36 |
+
# Write the output video
|
37 |
height, width, _ = processed_frames[0].shape
|
38 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
39 |
output_path = "output.mp4"
|
40 |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
41 |
for frame in processed_frames:
|
|
|
42 |
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
43 |
out.write(frame_bgr)
|
44 |
out.release()
|