Spaces:
Runtime error
Runtime error
import gradio as gr | |
import asyncio, httpx | |
import async_timeout | |
from langchain.chat_models import ChatOpenAI | |
from langchain.output_parsers import StructuredOutputParser, ResponseSchema | |
from gpt_index import ServiceContext, GPTSimpleVectorIndex, LLMPredictor, QuestionAnswerPrompt, RefinePrompt | |
from gpt_index.evaluation.base import DEFAULT_REFINE_PROMPT | |
from gpt_index.llm_predictor.chatgpt import ChatGPTLLMPredictor | |
from loguru import logger | |
from typing import Optional, List | |
from pydantic import BaseModel | |
import os | |
from dotenv import load_dotenv | |
from gpt_index.output_parsers import LangchainOutputParser | |
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT_TMPL, DEFAULT_REFINE_PROMPT_TMPL | |
load_dotenv() | |
API_KEY = os.getenv("OPENAI_API_KEY") | |
class Message(BaseModel): | |
role: str | |
content: str | |
index_content = open('index.json', "r", encoding='utf-8').read() | |
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")) | |
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=512) | |
index = GPTSimpleVectorIndex.load_from_string(index_content,service_context=service_context) | |
QA_PROMPT_TMPL = DEFAULT_TEXT_QA_PROMPT_TMPL | |
QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL) | |
async def get_message(query_str): | |
full_query = '' | |
for message in query_str: | |
full_query +=f'{message["role"]}: {message["content"]}\n' | |
task = index.aquery( full_query+ '. Trả lời bằng tiếng Việt', | |
text_qa_template=QA_PROMPT) | |
return task | |
async def predict(input, history): | |
""" | |
Predict the response of the chatbot and complete a running list of chat history. | |
""" | |
history.append({"role": "user", "content": input}) | |
task = await get_message(history) | |
task_result = await task | |
source_nodes = task_result.source_nodes | |
response_content = task_result.response | |
history.append({"role": "Xuka", "content": response_content}) | |
messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history)-1, 2)] | |
return messages, history | |
""" | |
Gradio Blocks low-level API that allows to create custom web applications (here our chat app) | |
""" | |
with gr.Blocks() as demo: | |
logger.info("Starting Demo...") | |
chatbot = gr.Chatbot(label="WebGPT") | |
state = gr.State([]) | |
with gr.Row(): | |
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) | |
txt.submit(predict, [txt, state], [chatbot, state]) | |
demo.launch() |