import gradio as gr
from huggingface_hub import InferenceClient

client = InferenceClient(model="https://9bsneexhe83nu6-80.proxy.runpod.net")

def inference(message, history):
    partial_message = ""
    for token in client.text_generation(prompt=message, max_new_tokens=512, stream=True, best_of=1, temperature=0.3, 
                                        top_p=0.99, do_sample=True, repetition_penalty=1.0):
        if token.startswith("<s>"):
            return partial_message
        partial_message += token
        yield partial_message

gr.ChatInterface(
    inference,
    chatbot=gr.Chatbot(height=300, scale=7),
    textbox=gr.Textbox(placeholder="你可以问我任何关于SequioaDB的问题!", container=False, scale=7),
    description="这是SequioaDB旗下的AI智能大语言模型,训练超过上万条真实数据和7亿参数。",
    title="ChatSDB",
    examples=["SequoiaDB巨杉数据库是什么?", "SequoiaDB巨杉数据库支持哪些类型的数据库实例?"],
    retry_btn="重试",
    undo_btn="撤销",
    clear_btn="清除",
    submit_btn="提问",
).queue().launch()