Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Create app.py
Browse files
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio as gr
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            from PIL import Image
         | 
| 4 | 
            +
            from transformers import ColPaliForRetrieval, ColPaliProcessor
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            model_name = "vidore/colpali-v1.3-hf"
         | 
| 7 | 
            +
            model = ColPaliForRetrieval.from_pretrained(model_name, torch_dtype=torch.float32).eval()
         | 
| 8 | 
            +
            processor = ColPaliProcessor.from_pretrained(model_name)
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            def process_image(image):
         | 
| 11 | 
            +
                inputs = processor(images=image, return_tensors="pt")
         | 
| 12 | 
            +
                with torch.no_grad():
         | 
| 13 | 
            +
                    outputs = model(**inputs)
         | 
| 14 | 
            +
                return outputs.embeddings.squeeze().tolist()
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            demo = gr.Interface(
         | 
| 17 | 
            +
                fn=process_image,
         | 
| 18 | 
            +
                inputs=gr.Image(type="pil"),
         | 
| 19 | 
            +
                outputs="json",
         | 
| 20 | 
            +
                examples=[["example1.jpg"], ["example2.jpg"]]
         | 
| 21 | 
            +
            )
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            demo.launch()
         |