|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from accelerate import init_empty_weights, infer_auto_device_map |
|
import torch |
|
|
|
|
|
model_name = "bushai/sar-i-7b" |
|
|
|
|
|
with init_empty_weights(): |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
device_map = infer_auto_device_map(model, max_memory={0: "14GB", "cpu": "2GB"}) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
|
|
inputs = tokenizer(message, return_tensors="pt").to(model.device) |
|
|
|
|
|
outputs = model.generate(inputs['input_ids'], max_new_tokens=max_tokens, temperature=temperature, top_p=top_p) |
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox(value="You are a friendly chatbot.", label="System message"), |
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), |
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|