File size: 1,418 Bytes
2daf177
ad33b28
2daf177
 
 
 
 
 
 
 
540ec9f
2daf177
 
04de766
2daf177
 
ad33b28
 
 
 
 
b7c0c56
 
 
 
dcc7827
b7c0c56
2daf177
04de766
 
 
dc729f5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsAndBytesConfig
from peft import PeftModel
import spaces
import torch
from huggingface_hub.hf_api import HfFolder
import os
token = os.getenv('token')
HfFolder.save_token(token)
device = "cuda"
model = PaliGemmaForConditionalGeneration.from_pretrained("triphuong57/paligemma_lora").to("cuda")
processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")

@spaces.GPU(duration=180)
def greet(image, prompt):
    
    
    # model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
    
    
    # # model = PeftModel(base_model, "/folders").to(device)
    model_inputs = processor(text=prompt, images=image, return_tensors="pt")
    input_len = model_inputs["input_ids"].shape[-1]
    with torch.inference_mode():
        generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
        decoded = processor.decode(generation[0][input_len:], skip_special_tokens=True)
    return decoded

title = "Demo BTL nhΓ³m 8"
description = "Made by Nguyα»…n QuΓ½ Đang, Đỗ Minh NhαΊ­t, VΕ© VΓ’n Long"
demo = gr.Interface(fn=greet, inputs=[gr.Image(label="Upload image", sources=['upload', 'webcam'], type="pil"), gr.Text()], outputs="text", title=title, description=description)
demo.launch(share=True)