File size: 1,675 Bytes
14a5715 e020deb 14a5715 e020deb 14a5715 e020deb 14a5715 e020deb 14a5715 ec7dd93 14a5715 e020deb 14a5715 e020deb 14a5715 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate import init_empty_weights, infer_auto_device_map
import torch
# Nombre del modelo en Hugging Face
model_name = "bushai/sar-i-7b"
# Inicializa un modelo vacío para ahorrar memoria
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(model_name)
# Definir cómo distribuir el modelo entre CPU y GPU
device_map = infer_auto_device_map(model, max_memory={0: "14GB", "cpu": "2GB"})
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
# Tokenizar la entrada del usuario
inputs = tokenizer(message, return_tensors="pt").to(model.device)
# Generar la respuesta del modelo
outputs = model.generate(inputs['input_ids'], max_new_tokens=max_tokens, temperature=temperature, top_p=top_p)
# Decodificar la salida generada
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Configurar la interfaz de Gradio
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
if __name__ == "__main__":
demo.launch()
|