Spaces:
Sleeping
Sleeping
File size: 1,414 Bytes
fc3cf6d 7ec5fdb fc3cf6d f5025c8 fc3cf6d f5025c8 fc3cf6d f5025c8 da20661 a8a26fe da20661 a8a26fe 7ec5fdb aa5e6d7 da20661 aa5e6d7 da20661 7ec5fdb da20661 a8a26fe fc3cf6d f5025c8 fc3cf6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Cargar el modelo y el tokenizador
model_name = "distilgpt2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_response(prompt, max_length=100):
inputs = tokenizer.encode(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
inputs,
max_length=max_length,
num_return_sequences=1,
temperature=0.7,
top_p=0.9,
do_sample=True
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.strip()
def chatbot(message, history):
history = history or []
# Construir el prompt
prompt = "Eres un asistente AI amigable y útil. Responde de manera concisa y coherente.\n\n"
for human, ai in history:
prompt += f"Human: {human}\nAI: {ai}\n"
prompt += f"Human: {message}\nAI:"
response = generate_response(prompt)
history.append((message, response))
return history, history
iface = gr.Interface(
fn=chatbot,
inputs=["text", "state"],
outputs=["chatbot", "state"],
title="Tu Compañero AI con DistilGPT-2",
description="Un chatbot de IA utilizando el modelo DistilGPT-2 para conversaciones simples.",
)
iface.launch() |