Spaces:
Running
Running
import streamlit as st | |
from transformers import ( | |
AutoModelForCausalLM, | |
AutoTokenizer, | |
pipeline, | |
BitsAndBytesConfig | |
) | |
import torch | |
# 1. Configuraci贸n del Modelo | |
def load_model(): | |
try: | |
# Configuraci贸n correcta para M1/M2 | |
quantization_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_compute_dtype=torch.bfloat16, # Usar bfloat16 para MPS | |
bnb_4bit_use_double_quant=True | |
) | |
model = AutoModelForCausalLM.from_pretrained( | |
"HuggingFaceH4/zephyr-7b-beta", | |
device_map="mps", # Forzar uso de Metal | |
quantization_config=quantization_config, | |
torch_dtype=torch.bfloat16, # Tipo de dato compatible | |
trust_remote_code=True | |
) | |
tokenizer = AutoTokenizer.from_pretrained( | |
"microsoft/Phi-3-mini-4k-instruct" | |
) | |
return model, tokenizer | |
except Exception as e: | |
st.error(f"Error cargando el modelo: {str(e)}") | |
return None, None | |
# 2. Interfaz de Streamlit | |
st.title("馃 Chatbot Optimizado para M1") | |
st.markdown("Usando Microsoft Phi-3-mini - [Hugging Face](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)") | |
# 3. Inicializaci贸n de Sesi贸n | |
if "messages" not in st.session_state: | |
st.session_state.messages = [ | |
{"role": "assistant", "content": "隆Hola! Soy tu asistente AI. 驴En qu茅 puedo ayudarte?"} | |
] | |
# 4. Carga del Modelo | |
model, tokenizer = load_model() | |
# 5. Funci贸n de Generaci贸n | |
def generate_response(prompt): | |
try: | |
messages = [ | |
{"role": "user", "content": prompt} | |
] | |
inputs = tokenizer.apply_chat_template( | |
messages, | |
return_tensors="pt" | |
).to(model.device) | |
outputs = model.generate( | |
inputs, | |
max_new_tokens=512, | |
temperature=0.7, | |
top_p=0.9, | |
do_sample=True, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
return tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True) | |
except Exception as e: | |
return f"Error generando respuesta: {str(e)}" | |
# 6. Interacci贸n del Usuario | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input("Escribe tu mensaje..."): | |
# Mostrar input del usuario | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Generar respuesta | |
with st.chat_message("assistant"): | |
with st.spinner("Pensando..."): | |
response = generate_response(prompt) | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) |