File size: 1,073 Bytes
e065ed6 663fbef e065ed6 739755b 663fbef f5cee09 48bfd6d e065ed6 f5cee09 459e721 ac8338c f5cee09 e065ed6 48bfd6d f5cee09 48bfd6d ca334ba e065ed6 663fbef e065ed6 af6029d 4f83ca1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
#for learning
import os
import openai
import gradio as gr
openai.api_key = os.environ.get('O_APIKey')
#HF_Token = os.environ.get('HF_Token')
Data_Read = os.environ.get('Data_Reader')
ChurnData = os.environ.get('Churn_Data')
ChurnData2 = os.environ.get('Churn_Data2')
#read data
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex, download_loader
DataReader = download_loader(Data_Read)
loader = DataReader()
### 1st file
documents = loader.load_data(file=ChurnData)
### 1st file
### 2nd file
documents2 = loader.load_data(file=ChurnData2)
documents = documents + documents2
### 2nd file
#create index
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
def reply(message, history):
answer = str(query_engine.query(message))
return answer
Conversing = gr.ChatInterface(reply, chatbot=gr.Chatbot(height="70vh"), retry_btn=None,theme=gr.themes.Monochrome(),
title = 'BT Accor Q&A', undo_btn = None, clear_btn = None, css='footer {visibility: hidden}').launch() |