Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
# 1. Load model and tokenizer | |
model_name = "microsoft/DialoGPT-small" # or any other chat model | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# To keep conversation context, store user input in a global | |
# or external variable. But for simplicity, let's use a simple function. | |
chat_history = [] | |
def chatbot(input_text): | |
# Use global chat_history | |
global chat_history | |
# Encode the new user input, plus the chat history | |
input_ids = tokenizer.encode(tokenizer.eos_token.join(chat_history) + tokenizer.eos_token + input_text + tokenizer.eos_token, return_tensors='pt') | |
# Generate response | |
output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id) | |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
# Update chat history | |
chat_history.append(input_text) | |
chat_history.append(response) | |
return response | |
iface = gr.Interface(fn=chatbot, | |
inputs="text", | |
outputs="text", | |
title="AI Girlfriend/Boyfriend Chatbot") | |
def run_app(): | |
iface.launch(server_name="0.0.0.0", server_port=7860) | |
if __name__ == "__main__": | |
run_app() | |