Manju017's picture
Fix model loading and device map inference
b0ea0b7 verified
raw
history blame
1.03 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import infer_auto_device_map
# Load the model name
model_name = "ai4bharat/Airavata"
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Load the model first
model = AutoModelForCausalLM.from_pretrained(model_name, load_in_8bit=True)
# Now infer the device map
device_map = infer_auto_device_map(model)
# Move model to the appropriate device based on device_map
model.to(device_map)
# Define the inference function
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Create the Gradio interface
interface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Airavata Text Generation Model",
description="This is the AI4Bharat Airavata model for text generation in Indic languages."
)
# Launch the interface
interface.launch()