Update simple_app.py
Browse files- simple_app.py +23 -16
simple_app.py
CHANGED
@@ -15,7 +15,7 @@ snapshot_download(
|
|
15 |
repo_id="Wan-AI/Wan2.1-T2V-1.3B",
|
16 |
local_dir="./Wan2.1-T2V-1.3B"
|
17 |
)
|
18 |
-
print("Model downloaded successfully.")
|
19 |
|
20 |
def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
21 |
total_process_steps = 11
|
@@ -34,8 +34,9 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
|
34 |
sub_tick_total = 1500
|
35 |
video_phase = False
|
36 |
|
|
|
37 |
command = [
|
38 |
-
"python", "-u", "
|
39 |
"--task", "t2v-1.3B",
|
40 |
"--size", "480*480",
|
41 |
"--ckpt_dir", "./Wan2.1-T2V-1.3B",
|
@@ -47,7 +48,7 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
|
47 |
"--save_file", "generated_video.mp4"
|
48 |
]
|
49 |
|
50 |
-
print("Starting video generation process...")
|
51 |
process = subprocess.Popen(
|
52 |
command,
|
53 |
stdout=subprocess.PIPE,
|
@@ -60,9 +61,12 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
|
60 |
|
61 |
for line in iter(stdout.readline, ''):
|
62 |
stripped_line = line.strip()
|
|
|
|
|
63 |
if not stripped_line:
|
64 |
continue
|
65 |
|
|
|
66 |
progress_match = progress_pattern.search(stripped_line)
|
67 |
if progress_match:
|
68 |
if sub_bar is not None and sub_ticks < sub_tick_total:
|
@@ -89,7 +93,6 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
|
89 |
parts = stripped_line.split("INFO:", 1)
|
90 |
msg = parts[1].strip() if len(parts) > 1 else ""
|
91 |
print(f"[INFO]: {msg}")
|
92 |
-
|
93 |
if processed_steps < irrelevant_steps:
|
94 |
processed_steps += 1
|
95 |
continue
|
@@ -104,31 +107,35 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
|
104 |
ncols=120, dynamic_ncols=False, leave=True)
|
105 |
sub_ticks = 0
|
106 |
continue
|
107 |
-
else:
|
108 |
-
print(stripped_line)
|
109 |
|
110 |
process.wait()
|
111 |
|
|
|
112 |
if video_progress_bar is not None:
|
113 |
video_progress_bar.close()
|
114 |
if sub_bar is not None:
|
115 |
sub_bar.close()
|
116 |
overall_bar.close()
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
121 |
else:
|
122 |
-
print("
|
123 |
-
raise gr.Error("Video generation failed. Check logs
|
124 |
|
125 |
-
# Gradio UI
|
126 |
with gr.Blocks() as demo:
|
127 |
with gr.Column():
|
128 |
-
gr.Markdown("# Wan 2.1 1.3B")
|
129 |
-
gr.Markdown("
|
130 |
-
prompt = gr.Textbox(label="
|
131 |
-
submit_btn = gr.Button("
|
132 |
video_res = gr.Video(label="Generated Video")
|
133 |
|
134 |
submit_btn.click(
|
|
|
15 |
repo_id="Wan-AI/Wan2.1-T2V-1.3B",
|
16 |
local_dir="./Wan2.1-T2V-1.3B"
|
17 |
)
|
18 |
+
print("✅ Model downloaded successfully.")
|
19 |
|
20 |
def infer(prompt, progress=gr.Progress(track_tqdm=True)):
|
21 |
total_process_steps = 11
|
|
|
34 |
sub_tick_total = 1500
|
35 |
video_phase = False
|
36 |
|
37 |
+
# ✅ Use generate.py directly
|
38 |
command = [
|
39 |
+
"python", "-u", "generate.py", # <- Make sure generate.py is in the same folder
|
40 |
"--task", "t2v-1.3B",
|
41 |
"--size", "480*480",
|
42 |
"--ckpt_dir", "./Wan2.1-T2V-1.3B",
|
|
|
48 |
"--save_file", "generated_video.mp4"
|
49 |
]
|
50 |
|
51 |
+
print("🚀 Starting video generation process...")
|
52 |
process = subprocess.Popen(
|
53 |
command,
|
54 |
stdout=subprocess.PIPE,
|
|
|
61 |
|
62 |
for line in iter(stdout.readline, ''):
|
63 |
stripped_line = line.strip()
|
64 |
+
print(f"[SUBPROCESS]: {stripped_line}") # Debug print
|
65 |
+
|
66 |
if not stripped_line:
|
67 |
continue
|
68 |
|
69 |
+
# Match video generation progress (like tqdm)
|
70 |
progress_match = progress_pattern.search(stripped_line)
|
71 |
if progress_match:
|
72 |
if sub_bar is not None and sub_ticks < sub_tick_total:
|
|
|
93 |
parts = stripped_line.split("INFO:", 1)
|
94 |
msg = parts[1].strip() if len(parts) > 1 else ""
|
95 |
print(f"[INFO]: {msg}")
|
|
|
96 |
if processed_steps < irrelevant_steps:
|
97 |
processed_steps += 1
|
98 |
continue
|
|
|
107 |
ncols=120, dynamic_ncols=False, leave=True)
|
108 |
sub_ticks = 0
|
109 |
continue
|
|
|
|
|
110 |
|
111 |
process.wait()
|
112 |
|
113 |
+
# Final cleanup
|
114 |
if video_progress_bar is not None:
|
115 |
video_progress_bar.close()
|
116 |
if sub_bar is not None:
|
117 |
sub_bar.close()
|
118 |
overall_bar.close()
|
119 |
|
120 |
+
# ✅ Output validation
|
121 |
+
if process.returncode == 0:
|
122 |
+
if os.path.exists("generated_video.mp4"):
|
123 |
+
print("✅ Video generation completed successfully.")
|
124 |
+
return "generated_video.mp4"
|
125 |
+
else:
|
126 |
+
print("❌ Video generation finished but output file is missing.")
|
127 |
+
raise gr.Error("Output video not found after generation.")
|
128 |
else:
|
129 |
+
print("❌ Subprocess failed.")
|
130 |
+
raise gr.Error("Video generation failed. Check logs above.")
|
131 |
|
132 |
+
# ✅ Gradio UI
|
133 |
with gr.Blocks() as demo:
|
134 |
with gr.Column():
|
135 |
+
gr.Markdown("# Wan 2.1 1.3B - Text to Video")
|
136 |
+
gr.Markdown("Generate short videos from prompts. Duplicate this space to avoid queue limits.")
|
137 |
+
prompt = gr.Textbox(label="Enter your prompt")
|
138 |
+
submit_btn = gr.Button("Generate Video")
|
139 |
video_res = gr.Video(label="Generated Video")
|
140 |
|
141 |
submit_btn.click(
|