File size: 2,970 Bytes
adde8fa
e966f2c
3633e01
 
 
 
e966f2c
3633e01
adde8fa
e65459d
 
3633e01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97dbc81
3633e01
d5288b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3633e01
 
 
 
 
d5288b7
 
3633e01
adde8fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d5288b7
adde8fa
d5288b7
 
adde8fa
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gradio as gr
from langchain.prompts import PromptTemplate
import os
from langchain.vectorstores import Chroma
from getpass import getpass
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA, LLMChain
from langchain.chat_models import ChatOpenAI

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')

model_name = "text-embedding-ada-002"
# get openai api key from platform.openai.com

OAIembeddings = OpenAIEmbeddings(
    model=model_name, openai_api_key=OPENAI_API_KEY, disallowed_special=()
)

load_vector_store = Chroma(persist_directory="iupui_openai_store_final", embedding_function=OAIembeddings)
prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.

Context: {context}
Question: {question}

Only return the helpful answer below and nothing else.
Helpful answer:
"""
prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
retriever = load_vector_store.as_retriever(search_kwargs={"k":5})
llm = ChatOpenAI(temperature=0.7, model='gpt-3.5-turbo',openai_api_key=OPENAI_API_KEY)
sample_prompts = ["what is HCI?","Tell me more about IUPUI buildings","UITS",'How is research at Computer Science department?']



def parse_json_result(data):
    # Parse the JSON data
    #data = json.loads(json_data)

    # Initialize a list to hold the parsed results
    parsed_results = []

    # Iterate through each document in the source_documents
    for document in data['source_documents']:
        # Extract page_content and url from each document
        page_content = document.page_content
        url = document.metadata['url']

        # Add the extracted data to the parsed_results list
        parsed_results.append({'page_content': page_content, 'url': url})

    return parsed_results


def get_response(input):
  query = input
  chain_type_kwargs = {"prompt": prompt}
  qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs=chain_type_kwargs, verbose=True)
  response = qa(query)
  return (response['result'], parse_json_result(response))

    
input = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
# Define additional output for Relevant Links
relevant_links_output = gr.Textbox(
    label="Relevant Links",
    placeholder="Links will be displayed here"
)

iface = gr.Interface(fn=get_response, 
             inputs=input, 
             outputs=["text",relevant_links_output],
             title="Unibot",
             description="This is your friendly IUPUI Chatbot",
             examples=sample_prompts,
             allow_flagging=False,
            theme='HaleyCH/HaleyCH_Theme'
             )

iface.launch()