Spaces:
Runtime error
Runtime error
| from langchain_community.document_loaders import PyPDFLoader,DirectoryLoader | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_community.vectorstores import FAISS | |
| import os | |
| from langchain.prompts import PromptTemplate | |
| from langchain_together import Together | |
| from langchain.chat_models import ChatOpenAI | |
| from htmlTemplates import css, bot_template, user_template | |
| from langchain.embeddings import openai | |
| from langchain.memory import ConversationBufferWindowMemory | |
| from langchain.chains import ConversationalRetrievalChain | |
| import streamlit as st | |
| import time | |
| from openai import OpenAI | |
| api_key = os.getenv("OPENAI_API_KEY") | |
| client = OpenAI(api_key=api_key) | |
| # creating custom template to guide llm model | |
| custom_template ="""<s>[INST]You will start the conversation by greeting the user and introducing yourself as qanoon-bot,\ | |
| stating your availability for legal assistance. Your next step will depend on the user's response.\ | |
| If the user expresses a need for legal assistance in Pakistan, you will ask them to describe their case or problem.\ | |
| After receiving the case or problem details from the user, you will provide the solutions and procedures according to the knowledge base and also give related penal codes and procedures. \ | |
| However, if the user does not require legal assistance in Pakistan, you will immediately thank them and\ | |
| say goodbye, ending the conversation. Remember to base your responses on the user's needs, providing accurate and\ | |
| concise information regarding the Pakistan legal law and rights where applicable. Your interactions should be professional and\ | |
| focused, ensuring the user's queries are addressed efficiently without deviating from the set flows.\ | |
| CHAT HISTORY: {chat_history} | |
| QUESTION: {question} | |
| ANSWER: | |
| </s>[INST] | |
| """ | |
| embeddings=OpenAIEmbeddings() | |
| #embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1",model_kwargs={"trust_remote_code":True,"revision":"289f532e14dbbbd5a04753fa58739e9ba766f3c7"}) | |
| #vectordb = Chroma.from_documents(texts, embedding=embeddings, persist_directory="./data") | |
| #db_retriever =vectordb.as_retriever(search_type="similarity",search_kwargs={'k':4}) | |
| db = FAISS.load_local("vectordb", embeddings, allow_dangerous_deserialization=True) | |
| db_retriever = db.as_retriever(search_type="similarity",search_kwargs={"k": 4}) | |
| st.set_page_config(page_title="Qanoon-Bot") | |
| col1, col2, col3 = st.columns([1,4,1]) | |
| with col2: | |
| st.image("https://s3.ap-south-1.amazonaws.com/makerobosfastcdn/cms-assets/Legal_AI_Chatbot.png") | |
| st.markdown( | |
| """ | |
| <style> | |
| div.stButton > button:first-child { | |
| background-color: #ffd0d0; | |
| } | |
| div.stButton > button:active { | |
| background-color: #ff6262; | |
| } | |
| div[data-testid="stStatusWidget"] div button { | |
| display: none; | |
| } | |
| .reportview-container { | |
| margin-top: -2em; | |
| } | |
| #MainMenu {visibility: hidden;} | |
| .stDeployButton {display:none;} | |
| footer {visibility: hidden;} | |
| #stDecoration {display:none;} | |
| button[title="View fullscreen"]{ | |
| visibility: hidden;} | |
| </style> | |
| """, | |
| unsafe_allow_html=True, | |
| ) | |
| def reset_conversation(): | |
| st.session_state.messages = [] | |
| st.session_state.memory.clear() | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "memory" not in st.session_state: | |
| st.session_state.memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history",return_messages=True) | |
| #embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1",model_kwargs={"trust_remote_code":True,"revision":"289f532e14dbbbd5a04753fa58739e9ba766f3c7"}) | |
| #db=FAISS.load_local("/content/ipc_vector_db", embeddings, allow_dangerous_deserialization=True) | |
| prompt = PromptTemplate(template=custom_template, | |
| input_variables=['context', 'question', 'chat_history']) | |
| # You can also use other LLMs options from https://python.langchain.com/docs/integrations/llms. Here I have used TogetherAI API | |
| from config import together_api | |
| llm=ChatOpenAI(temperature=0.2,model_name='gpt-3.5-turbo-0125') | |
| qa = ConversationalRetrievalChain.from_llm( | |
| llm=llm, | |
| memory=st.session_state.memory, | |
| retriever=db_retriever, | |
| combine_docs_chain_kwargs={'prompt': prompt} | |
| ) | |
| for message in st.session_state.messages: | |
| with st.chat_message(message.get("role")): | |
| st.write(message.get("content")) | |
| input_prompt = st.chat_input("Say something") | |
| if input_prompt: | |
| with st.chat_message("user"): | |
| st.write(input_prompt) | |
| st.session_state.messages.append({"role":"user","content":input_prompt}) | |
| with st.chat_message("assistant"): | |
| with st.status("Thinking π‘...",expanded=True): | |
| result = qa.invoke(input=input_prompt) | |
| message_placeholder = st.empty() | |
| full_response = "**_Note: Information provided by Qanoon-Bot may be inaccurate. ** \n\n\n" | |
| for chunk in result["answer"]: | |
| full_response+=chunk | |
| time.sleep(0.02) | |
| message_placeholder.markdown(full_response+" β") | |
| st.button('Reset All Chat ποΈ', on_click=reset_conversation) | |
| st.session_state.messages.append({"role":"assistant","content":result["answer"]}) | |