abhinavztb commited on
Commit
3633e01
·
1 Parent(s): e58a10d
Files changed (1) hide show
  1. app.py +36 -0
app.py CHANGED
@@ -1,5 +1,41 @@
1
  import gradio as gr
 
 
 
 
 
 
 
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  input = gr.Text(
4
  label="Prompt",
5
  show_label=False,
 
1
  import gradio as gr
2
+ from langchain import PromptTemplate, LLMChain
3
+ import os
4
+ from langchain.vectorstores import Chroma
5
+ from getpass import getpass
6
+ from langchain.embeddings.openai import OpenAIEmbeddings
7
+ from langchain.chains import RetrievalQA
8
+ from langchain.chat_models import ChatOpenAI
9
 
10
+ model_name = "text-embedding-ada-002"
11
+ # get openai api key from platform.openai.com
12
+ OPENAI_API_KEY = token
13
+
14
+ OAIembeddings = OpenAIEmbeddings(
15
+ model=model_name, openai_api_key=OPENAI_API_KEY, disallowed_special=()
16
+ )
17
+
18
+ load_vector_store = Chroma(persist_directory="iupui_openai_store_final", embedding_function=OAIembeddings)
19
+ prompt_template = """Use the following pieces of information to answer the user's question.
20
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
21
+
22
+ Context: {context}
23
+ Question: {question}
24
+
25
+ Only return the helpful answer below and nothing else.
26
+ Helpful answer:
27
+ """
28
+ prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
29
+ retriever = load_vector_store.as_retriever(search_kwargs={"k":5})
30
+ llm = ChatOpenAI(temperature=0.7, model='gpt-3.5-turbo',openai_api_key=OPENAI_API_KEY)
31
+
32
+ def get_response(input):
33
+ query = input
34
+ chain_type_kwargs = {"prompt": prompt}
35
+ qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs=chain_type_kwargs, verbose=True)
36
+ response = qa(query)
37
+ return (response['result'], response['source_documents'])
38
+
39
  input = gr.Text(
40
  label="Prompt",
41
  show_label=False,