Spaces:
Runtime error
Runtime error
abcd
Browse files- chains/openai_model.py +5 -7
- requirements.txt +1 -0
chains/openai_model.py
CHANGED
@@ -6,7 +6,7 @@ from langchain.prompts import PromptTemplate
|
|
6 |
from config import TIMEOUT_STREAM
|
7 |
from vector_db import upload_file
|
8 |
from callback import StreamingGradioCallbackHandler
|
9 |
-
from queue import SimpleQueue, Empty
|
10 |
from threading import Thread
|
11 |
from utils import history_file_path, load_lasted_file_username, add_source_numbers, add_details
|
12 |
from chains.custom_chain import CustomConversationalRetrievalChain
|
@@ -197,25 +197,24 @@ class OpenAIModel:
|
|
197 |
status_text = "Request URL: " + OPENAI_API_BASE
|
198 |
yield chatbot, status_text
|
199 |
# Create a funciton to call - this will run in a thread
|
200 |
-
|
201 |
# Create a Queue object
|
202 |
response_queue = SimpleQueue()
|
|
|
203 |
def task():
|
204 |
# Converation + RetrivalChain
|
205 |
qa = CustomConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(k=5),
|
206 |
-
condense_question_llm = condense_llm, verbose=True,
|
207 |
condense_question_prompt=condense_prompt,
|
208 |
combine_docs_chain_kwargs={"prompt": qa_prompt},
|
209 |
return_source_documents=True)
|
210 |
# query with input and chat history
|
211 |
response = qa({"question": inputs, "chat_history": self.history})
|
212 |
-
# Put response in the queue
|
213 |
response_queue.put(response)
|
214 |
q.put(job_done)
|
215 |
|
216 |
|
217 |
thread = Thread(target=task)
|
218 |
-
thread.start()
|
219 |
chatbot.append((inputs, ""))
|
220 |
content = ""
|
221 |
while True:
|
@@ -228,9 +227,8 @@ class OpenAIModel:
|
|
228 |
yield chatbot, status_text
|
229 |
except Empty:
|
230 |
continue
|
231 |
-
|
232 |
# add citation info to response
|
233 |
-
# Get the response from the queue
|
234 |
response = response_queue.get()
|
235 |
relevant_docs = response["source_documents"]
|
236 |
reference_results = [d.page_content for d in relevant_docs]
|
|
|
6 |
from config import TIMEOUT_STREAM
|
7 |
from vector_db import upload_file
|
8 |
from callback import StreamingGradioCallbackHandler
|
9 |
+
from queue import SimpleQueue, Empty, Queue
|
10 |
from threading import Thread
|
11 |
from utils import history_file_path, load_lasted_file_username, add_source_numbers, add_details
|
12 |
from chains.custom_chain import CustomConversationalRetrievalChain
|
|
|
197 |
status_text = "Request URL: " + OPENAI_API_BASE
|
198 |
yield chatbot, status_text
|
199 |
# Create a funciton to call - this will run in a thread
|
|
|
200 |
# Create a Queue object
|
201 |
response_queue = SimpleQueue()
|
202 |
+
|
203 |
def task():
|
204 |
# Converation + RetrivalChain
|
205 |
qa = CustomConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(k=5),
|
206 |
+
condense_question_llm = condense_llm, verbose=True,
|
207 |
condense_question_prompt=condense_prompt,
|
208 |
combine_docs_chain_kwargs={"prompt": qa_prompt},
|
209 |
return_source_documents=True)
|
210 |
# query with input and chat history
|
211 |
response = qa({"question": inputs, "chat_history": self.history})
|
|
|
212 |
response_queue.put(response)
|
213 |
q.put(job_done)
|
214 |
|
215 |
|
216 |
thread = Thread(target=task)
|
217 |
+
thread.start()
|
218 |
chatbot.append((inputs, ""))
|
219 |
content = ""
|
220 |
while True:
|
|
|
227 |
yield chatbot, status_text
|
228 |
except Empty:
|
229 |
continue
|
230 |
+
|
231 |
# add citation info to response
|
|
|
232 |
response = response_queue.get()
|
233 |
relevant_docs = response["source_documents"]
|
234 |
reference_results = [d.page_content for d in relevant_docs]
|
requirements.txt
CHANGED
@@ -6,4 +6,5 @@ gradio_client==0.2.7
|
|
6 |
tiktoken
|
7 |
pinecone-client
|
8 |
google-api-python-client
|
|
|
9 |
facebook-page-scraper
|
|
|
6 |
tiktoken
|
7 |
pinecone-client
|
8 |
google-api-python-client
|
9 |
+
bs4
|
10 |
facebook-page-scraper
|