Update app.py
Browse files
app.py
CHANGED
@@ -45,22 +45,25 @@ def get_vectorstore(text_chunks):
|
|
45 |
embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-xl")
|
46 |
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
47 |
return vectorstore
|
|
|
48 |
def get_conversation_chain(vectorstore):
|
49 |
-
#
|
50 |
llm = HuggingFaceEndpoint(
|
51 |
-
repo_id="google/flan-t5-base",
|
52 |
-
temperature=0.5,
|
53 |
-
max_new_tokens=248
|
54 |
)
|
55 |
|
|
|
56 |
memory = ConversationBufferMemory(
|
57 |
-
memory_key=
|
58 |
)
|
59 |
|
|
|
60 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
61 |
llm=llm,
|
62 |
retriever=vectorstore.as_retriever(),
|
63 |
-
memory=memory
|
64 |
)
|
65 |
return conversation_chain
|
66 |
|
|
|
45 |
embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-xl")
|
46 |
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
47 |
return vectorstore
|
48 |
+
|
49 |
def get_conversation_chain(vectorstore):
|
50 |
+
# Use HuggingFaceEndpoint with explicitly passed parameters
|
51 |
llm = HuggingFaceEndpoint(
|
52 |
+
repo_id="google/flan-t5-base",
|
53 |
+
temperature=0.5,
|
54 |
+
max_new_tokens=248, # Explicit parameter
|
55 |
)
|
56 |
|
57 |
+
# Initialize memory
|
58 |
memory = ConversationBufferMemory(
|
59 |
+
memory_key="chat_history", return_messages=True
|
60 |
)
|
61 |
|
62 |
+
# Create Conversational Retrieval Chain
|
63 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
64 |
llm=llm,
|
65 |
retriever=vectorstore.as_retriever(),
|
66 |
+
memory=memory,
|
67 |
)
|
68 |
return conversation_chain
|
69 |
|