Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -27,6 +27,16 @@ base_model = "John6666/hyper-flux1-dev-fp8-flux" | |
| 27 | 
             
            taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
         | 
| 28 | 
             
            good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
         | 
| 29 | 
             
            pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=good_vae).to(device)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 30 |  | 
| 31 | 
             
            MAX_SEED = 2**32-1
         | 
| 32 |  | 
|  | |
| 27 | 
             
            taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
         | 
| 28 | 
             
            good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
         | 
| 29 | 
             
            pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=good_vae).to(device)
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
         | 
| 32 | 
            +
            config = CLIPConfig.from_pretrained(model_id)
         | 
| 33 | 
            +
            config.text_config.max_position_embeddings = 248
         | 
| 34 | 
            +
            clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
         | 
| 35 | 
            +
            clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
         | 
| 36 | 
            +
            pipe.tokenizer = clip_processor.tokenizer
         | 
| 37 | 
            +
            pipe.text_encoder = clip_model.text_model
         | 
| 38 | 
            +
            pipe.tokenizer_max_length = 248
         | 
| 39 | 
            +
            pipe.text_encoder.dtype = torch.bfloat16
         | 
| 40 |  | 
| 41 | 
             
            MAX_SEED = 2**32-1
         | 
| 42 |  | 
