Maximofn commited on
Commit
0edac6b
·
1 Parent(s): 325c781

fix(src): :rocket: Merge all into one function

Browse files
Files changed (1) hide show
  1. app.py +14 -23
app.py CHANGED
@@ -9,8 +9,19 @@ from datetime import datetime
9
  import gradio as gr
10
 
11
  @spaces.GPU
12
- def initialize_model(model):
13
- # quant_config = BitsAndBytesConfig(load_in_8bit=True)
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  transformer_8bit = HunyuanVideoTransformer3DModel.from_pretrained(
16
  model,
@@ -28,24 +39,6 @@ def initialize_model(model):
28
  device_map="balanced",
29
  )
30
 
31
- return pipeline
32
-
33
- @spaces.GPU
34
- def generate_video(
35
- pipeline,
36
- prompt,
37
- resolution,
38
- video_length,
39
- seed,
40
- num_inference_steps,
41
- guidance_scale,
42
- flow_shift,
43
- embedded_guidance_scale
44
- ):
45
- seed = None if seed == -1 else seed
46
- width, height = resolution.split("x")
47
- width, height = int(width), int(height)
48
-
49
  # Generar el video usando el pipeline
50
  video = pipeline(
51
  prompt=prompt,
@@ -70,8 +63,6 @@ def generate_video(
70
  return video_path
71
 
72
  def create_demo(model):
73
- pipeline = initialize_model(model)
74
-
75
  with gr.Blocks() as demo:
76
  gr.Markdown("# Hunyuan Video Generation")
77
 
@@ -120,7 +111,7 @@ def create_demo(model):
120
  output = gr.Video(label="Generated Video")
121
 
122
  generate_btn.click(
123
- fn=lambda *inputs: generate_video(pipeline, *inputs),
124
  inputs=[
125
  prompt,
126
  resolution,
 
9
  import gradio as gr
10
 
11
  @spaces.GPU
12
+ def generate_video(
13
+ prompt,
14
+ resolution,
15
+ video_length,
16
+ seed,
17
+ num_inference_steps,
18
+ guidance_scale,
19
+ flow_shift,
20
+ embedded_guidance_scale
21
+ ):
22
+ seed = None if seed == -1 else seed
23
+ width, height = resolution.split("x")
24
+ width, height = int(width), int(height)
25
 
26
  transformer_8bit = HunyuanVideoTransformer3DModel.from_pretrained(
27
  model,
 
39
  device_map="balanced",
40
  )
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # Generar el video usando el pipeline
43
  video = pipeline(
44
  prompt=prompt,
 
63
  return video_path
64
 
65
  def create_demo(model):
 
 
66
  with gr.Blocks() as demo:
67
  gr.Markdown("# Hunyuan Video Generation")
68
 
 
111
  output = gr.Video(label="Generated Video")
112
 
113
  generate_btn.click(
114
+ fn=lambda *inputs: generate_video(*inputs),
115
  inputs=[
116
  prompt,
117
  resolution,