Spaces:
				
			
			
	
			
			
		Paused
		
	
	
	
			
			
	
	
	
	
		
		
		Paused
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -157,7 +157,7 @@ def generate(segment, image, prompt, size, guidance_scale, num_inference_steps, | |
| 157 | 
             
                            latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=1)
         | 
| 158 | 
             
                            timestep = t.expand(latents.shape[0]).to(latents.dtype)
         | 
| 159 | 
             
                            with torch.no_grad():
         | 
| 160 | 
            -
                              noise_pred =  | 
| 161 | 
             
                                hidden_states=latent_model_input,
         | 
| 162 | 
             
                                timestep=timestep,
         | 
| 163 | 
             
                                encoder_hidden_states=prompt_embeds,
         | 
| @@ -173,9 +173,9 @@ def generate(segment, image, prompt, size, guidance_scale, num_inference_steps, | |
| 173 | 
             
                            latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
         | 
| 174 | 
             
                intermediate_latents_cpu = latents.detach().cpu()
         | 
| 175 | 
             
                if segment==8:
         | 
| 176 | 
            -
                    latents = latents.to(self.vae.dtype) /  | 
| 177 | 
            -
                    video =  | 
| 178 | 
            -
                    video =  | 
| 179 | 
             
                    # return HunyuanVideoPipelineOutput(frames=video)
         | 
| 180 | 
             
                    save_dir = f"./"
         | 
| 181 | 
             
                    video_out_file = f"{save_dir}/{seed}.mp4"
         | 
|  | |
| 157 | 
             
                            latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=1)
         | 
| 158 | 
             
                            timestep = t.expand(latents.shape[0]).to(latents.dtype)
         | 
| 159 | 
             
                            with torch.no_grad():
         | 
| 160 | 
            +
                              noise_pred = pipe.transformer(
         | 
| 161 | 
             
                                hidden_states=latent_model_input,
         | 
| 162 | 
             
                                timestep=timestep,
         | 
| 163 | 
             
                                encoder_hidden_states=prompt_embeds,
         | 
|  | |
| 173 | 
             
                            latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
         | 
| 174 | 
             
                intermediate_latents_cpu = latents.detach().cpu()
         | 
| 175 | 
             
                if segment==8:
         | 
| 176 | 
            +
                    latents = latents.to(self.vae.dtype) / pipe.vae.config.scaling_factor
         | 
| 177 | 
            +
                    video = pipe.vae.decode(latents, return_dict=False)[0]
         | 
| 178 | 
            +
                    video = pipe.video_processor.postprocess_video(video, output_type=output_type)
         | 
| 179 | 
             
                    # return HunyuanVideoPipelineOutput(frames=video)
         | 
| 180 | 
             
                    save_dir = f"./"
         | 
| 181 | 
             
                    video_out_file = f"{save_dir}/{seed}.mp4"
         | 
