ubaid commited on
Commit
4507d55
·
verified ·
1 Parent(s): 73cdac7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -61
app.py CHANGED
@@ -1,61 +1,62 @@
1
- import langchain
2
- from langchain.chat_models import ChatOpenAI
3
- from langchain.chains import RetrievalQA
4
- from langchain_groq import ChatGroq
5
- from langchain.prompts import PromptTemplate
6
- from langchain.vectorstores import Chroma
7
- from langchain.embeddings import TensorflowHubEmbeddings,HuggingFaceEmbeddings
8
- import gradio as gr
9
- from langchain.memory import ConversationBufferMemory
10
- from langchain.chains import ConversationalRetrievalChain
11
- embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
12
- llm=ChatGroq(api_key='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')
13
- vector_db = Chroma(persist_directory='./db', embedding_function=embeddings)
14
- # Define a custom prompt template (Modify as needed)
15
- prompt_template = PromptTemplate.from_template(
16
- "Use the following context to answer the question you have no answer tell me i dont know:\n\n{context}\n\nQuestion: {question} "
17
- )
18
- memory=ConversationBufferMemory(
19
- memory_key='chat_history',
20
- return_messages=True
21
- )
22
-
23
- prompt=PromptTemplate.from_template(
24
- """You are an AI assistant that provides accurate and concise answers.
25
- Use the following retrieved documents to answer the question. If you don't know, say "I don't know."
26
-
27
- Context:
28
- {context}
29
-
30
- Question:
31
- {question}
32
-
33
- Answer:
34
- """)
35
-
36
- retriever=vector_db.as_retriever(
37
- search_type='similarity',
38
- search_kwargs={'k':4}
39
- )
40
-
41
-
42
- con=ConversationalRetrievalChain.from_llm(
43
- llm=llm,
44
- retriever=retriever,
45
- memory=memory,
46
- combine_docs_chain_kwargs={'prompt':prompt})
47
-
48
- # Use the new RetrievalQA structure
49
- retriever=vector_db.as_retriever(search_kwargs={"k": 3})
50
-
51
-
52
- def chat(query,history):
53
- try:
54
- response=con.run({'question':query,'chat_history':history})
55
- return str(response)
56
- except Exception as e:
57
- return 'slow connection'
58
-
59
- app=gr.ChatInterface(chat,theme=gr.themes.Soft(),title='Chat With Nasir Hussain'
60
- ,description='I have provide llm Nasir Husssain Youtube Playlist Data Playlist Link Here:https://youtube.com/playlist?list=PLuYWhEqu9a9A7s21UXlZ1yYNPk5ZLfhpH&si=qg8iuts2csW3P4bQ')
61
- app.launch(share=True)
 
 
1
+ import langchain
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.chains import RetrievalQA
4
+ from langchain_groq import ChatGroq
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.vectorstores import Chroma
7
+ import os
8
+ from langchain.embeddings import TensorflowHubEmbeddings,HuggingFaceEmbeddings
9
+ import gradio as gr
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.chains import ConversationalRetrievalChain
12
+ embeddings=HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
13
+ llm=ChatGroq(api_key=os.getenv('groq_api'))
14
+ vector_db = Chroma(persist_directory='./db', embedding_function=embeddings)
15
+ # Define a custom prompt template (Modify as needed)
16
+ prompt_template = PromptTemplate.from_template(
17
+ "Use the following context to answer the question you have no answer tell me i dont know:\n\n{context}\n\nQuestion: {question} "
18
+ )
19
+ memory=ConversationBufferMemory(
20
+ memory_key='chat_history',
21
+ return_messages=True
22
+ )
23
+
24
+ prompt=PromptTemplate.from_template(
25
+ """You are an AI assistant that provides accurate and concise answers.
26
+ Use the following retrieved documents to answer the question. If you don't know, say "I don't know."
27
+
28
+ Context:
29
+ {context}
30
+
31
+ Question:
32
+ {question}
33
+
34
+ Answer:
35
+ """)
36
+
37
+ retriever=vector_db.as_retriever(
38
+ search_type='similarity',
39
+ search_kwargs={'k':4}
40
+ )
41
+
42
+
43
+ con=ConversationalRetrievalChain.from_llm(
44
+ llm=llm,
45
+ retriever=retriever,
46
+ memory=memory,
47
+ combine_docs_chain_kwargs={'prompt':prompt})
48
+
49
+ # Use the new RetrievalQA structure
50
+ retriever=vector_db.as_retriever(search_kwargs={"k": 3})
51
+
52
+
53
+ def chat(query,history):
54
+ try:
55
+ response=con.run({'question':query,'chat_history':history})
56
+ return str(response)
57
+ except Exception as e:
58
+ return 'slow connection'
59
+
60
+ app=gr.ChatInterface(chat,theme=gr.themes.Soft(),title='Chat With Nasir Hussain'
61
+ ,description='I have provide llm Nasir Husssain Youtube Playlist Data Playlist Link Here:https://youtube.com/playlist?list=PLuYWhEqu9a9A7s21UXlZ1yYNPk5ZLfhpH&si=qg8iuts2csW3P4bQ')
62
+ app.launch(share=True)