Spaces:
Paused
Paused
| import gradio as gr | |
| from PIL import Image | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Set default device to CUDA for GPU acceleration | |
| device = 'cuda' if torch.cuda.is_available() else "cpu" | |
| # torch.set_default_device("cuda") | |
| # Initialize the model and tokenizer | |
| model = AutoModelForCausalLM.from_pretrained("ManishThota/Sparrow", torch_dtype=torch.float16, | |
| device_map="auto", | |
| trust_remote_code=True).to(device) | |
| tokenizer = AutoTokenizer.from_pretrained("ManishThota/Sparrow", trust_remote_code=True) | |
| # def predict_answer(image, question): | |
| # # Convert PIL image to RGB if not already | |
| # image = image.convert("RGB") | |
| # # # Format the text input for the model | |
| # # text = f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\n{question} ASSISTANT:" | |
| # # Tokenize the text input | |
| # encoding = tokenizer(image, question, return_tensors='pt').to(device) | |
| # out = model.generate(**encoding) | |
| # # Preprocess the image for the model | |
| # generated_text = tokenizer.decode(out[0], skip_special_tokens=True) | |
| # # # Generate the answer | |
| # # output_ids = model.generate( | |
| # # input_ids, | |
| # # max_new_tokens=100, | |
| # # images=image_tensor, | |
| # # use_cache=True)[0] | |
| # # # Decode the generated tokens to get the answer | |
| # # answer = tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True).strip() | |
| # return generated_text | |
| def predict_answer(image, question, max_tokens): | |
| #Set inputs | |
| text = f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\n{question}? ASSISTANT:" | |
| image = image.convert("RGB") | |
| input_ids = tokenizer(text, return_tensors='pt').input_ids | |
| image_tensor = model.image_preprocess(image) | |
| #Generate the answer | |
| output_ids = model.generate( | |
| input_ids, | |
| max_new_tokens=max_tokens, | |
| images=image_tensor, | |
| use_cache=True)[0] | |
| return tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True).strip() | |
| def gradio_predict(image, question, max_tokens): | |
| answer = predict_answer(image, question, max_tokens) | |
| return answer | |
| # Define the Gradio interface | |
| iface = gr.Interface( | |
| fn=gradio_predict, | |
| inputs=[gr.Image(type="pil", label="Upload or Drag an Image"), | |
| gr.Textbox(label="Question", placeholder="e.g. What are the colors of the bus in the image?", scale=4), | |
| gr.Slider(2, 100, value=25, label="Count", info="Choose between 2 and 100")], | |
| outputs=gr.TextArea(label="Answer"), | |
| title="Sparrow - Tiny 3B Visual Question Answering", | |
| description="An interactive chat model that can answer questions about images in Academic contest.", | |
| ) | |
| # Launch the app | |
| iface.queue().launch(debug=True) | |