1inkusFace commited on
Commit
13ddf33
·
verified ·
1 Parent(s): 125e8b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -15,7 +15,7 @@ from diffusers import HunyuanVideoTransformer3DModel
15
  from diffusers.utils import export_to_video
16
  from diffusers.utils import load_image
17
  from PIL import Image
18
-
19
  from torchao.quantization import float8_weight_only
20
  from torchao.quantization import quantize_
21
  from transformers import LlamaModel
@@ -80,13 +80,14 @@ negative_prompt = "Aerial view, aerial view, overexposed, low quality, deformati
80
 
81
  @spaces.GPU(duration=60)
82
  def generate(segment, image, prompt, size, guidance_scale, num_inference_steps, frames, seed, progress=gr.Progress(track_tqdm=True) ):
83
- Offload.offload(
84
- pipeline=pipe,
85
- config=offload_config,
86
- )
87
  random.seed(time.time())
88
  seed = int(random.randrange(4294967294))
89
  if segment==1:
 
 
 
 
90
  prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_attention_mask, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
91
  prompt=prompt, do_classifier_free_guidance=True, negative_prompt=negative_prompt, device=device
92
  )
 
15
  from diffusers.utils import export_to_video
16
  from diffusers.utils import load_image
17
  from PIL import Image
18
+ import numpy as np
19
  from torchao.quantization import float8_weight_only
20
  from torchao.quantization import quantize_
21
  from transformers import LlamaModel
 
80
 
81
  @spaces.GPU(duration=60)
82
  def generate(segment, image, prompt, size, guidance_scale, num_inference_steps, frames, seed, progress=gr.Progress(track_tqdm=True) ):
83
+
 
 
 
84
  random.seed(time.time())
85
  seed = int(random.randrange(4294967294))
86
  if segment==1:
87
+ Offload.offload(
88
+ pipeline=pipe,
89
+ config=offload_config,
90
+ )
91
  prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_attention_mask, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
92
  prompt=prompt, do_classifier_free_guidance=True, negative_prompt=negative_prompt, device=device
93
  )