import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer from accelerate import init_empty_weights, infer_auto_device_map import torch # Nombre del modelo en Hugging Face model_name = "bushai/sar-i-7b" # Inicializa un modelo vacío para ahorrar memoria with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained(model_name) # Definir cómo distribuir el modelo entre CPU y GPU device_map = infer_auto_device_map(model, max_memory={0: "14GB", "cpu": "2GB"}) model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name) def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): # Tokenizar la entrada del usuario inputs = tokenizer(message, return_tensors="pt").to(model.device) # Generar la respuesta del modelo outputs = model.generate(inputs['input_ids'], max_new_tokens=max_tokens, temperature=temperature, top_p=top_p) # Decodificar la salida generada response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Configurar la interfaz de Gradio demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], ) if __name__ == "__main__": demo.launch()