import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import gradio as gr

MODEL_NAME = "X-D-Lab/MindChat-Qwen-1_8B"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, trust_remote_code=True)
model.generation_config = GenerationConfig.from_pretrained(MODEL_NAME, trust_remote_code=True)

def chatbot(input_text, history=[]):
    inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
    response, history = model.chat(
        tokenizer,
        input_text,
        history=history,
        attention_mask=inputs["attention_mask"]
    )
    return response

gr.Interface(fn=chatbot, inputs="text", outputs="text", title="MindChat-Qwen").launch()