Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- __pycache__/gradio_app.cpython-311.pyc +0 -0
- gradio_app.py +110 -89
- gradio_app_old.py +138 -0
- langchain_qwen.ipynb +391 -0
- langchain_retreival.ipynb +0 -0
__pycache__/gradio_app.cpython-311.pyc
CHANGED
Binary files a/__pycache__/gradio_app.cpython-311.pyc and b/__pycache__/gradio_app.cpython-311.pyc differ
|
|
gradio_app.py
CHANGED
@@ -1,39 +1,23 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import gradio as gr
|
|
|
|
|
|
|
4 |
from langchain.callbacks.manager import CallbackManager
|
5 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
6 |
-
from langchain.chains import RetrievalQA
|
|
|
7 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
8 |
-
from langchain.llms import HuggingFaceTextGenInference
|
9 |
from langchain.prompts import PromptTemplate
|
|
|
10 |
from langchain.vectorstores import FAISS
|
11 |
-
|
12 |
-
from text_generation import Client, InferenceAPIClient
|
13 |
-
|
14 |
-
client = Client("http://20.83.177.108:8080")
|
15 |
-
|
16 |
-
|
17 |
-
def run_generation_stream(user_text, f, max_new_tokens, temperature):
|
18 |
-
# Get the model and tokenizer, and tokenize the user text.
|
19 |
-
print('called stream')
|
20 |
-
|
21 |
-
if len(user_text.strip()) == 0:
|
22 |
-
print('blank')
|
23 |
-
gr.Warning('Please enter a question to continue')
|
24 |
-
return
|
25 |
-
|
26 |
-
user_text = f"""You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a consice and factually correct manner. Also mention the relevant sections of the law wherever applicable.
|
27 |
-
### Input: {user_text}
|
28 |
-
### Response: """
|
29 |
-
|
30 |
-
text = ""
|
31 |
-
for response in client.generate_stream(user_text, max_new_tokens=max_new_tokens, repetition_penalty=1.05, temperature=temperature):
|
32 |
-
if not response.token.special:
|
33 |
-
text += response.token.text
|
34 |
-
yield text
|
35 |
-
|
36 |
-
return text
|
37 |
|
38 |
|
39 |
def reset_textbox():
|
@@ -52,7 +36,7 @@ model_norm = HuggingFaceBgeEmbeddings(
|
|
52 |
|
53 |
vectordb = FAISS.load_local('faissdb', embeddings=model_norm)
|
54 |
retriever = vectordb.as_retriever(
|
55 |
-
search_type='similarity', search_kwargs={"k":
|
56 |
|
57 |
|
58 |
# relating to refer to Indian Penal Code(IPC), CrPC(Code of Criminal Procedure) for most cases and therefore laws
|
@@ -69,70 +53,107 @@ PROMPT = PromptTemplate(
|
|
69 |
)
|
70 |
|
71 |
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
max_new_tokens=max_tokens,
|
78 |
-
top_k=10,
|
79 |
-
top_p=0.95,
|
80 |
-
typical_p=0.95,
|
81 |
-
temperature=temperature,
|
82 |
-
streaming=True if factual else False,
|
83 |
-
# repetition_penalty=1.1,
|
84 |
)
|
|
|
85 |
|
86 |
-
qa_chain = RetrievalQA.from_chain_type(llm=llm,
|
87 |
-
chain_type_kwargs={
|
88 |
-
"prompt": PROMPT},
|
89 |
-
retriever=retriever,
|
90 |
-
return_source_documents=True,
|
91 |
-
)
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
# print(response)
|
97 |
-
# # text += response
|
98 |
-
# yield response
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
with gr.Row():
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
from threading import Lock, Thread
|
5 |
+
from typing import Optional, Tuple
|
6 |
|
7 |
import gradio as gr
|
8 |
+
from langchain.agents import AgentType, Tool, initialize_agent
|
9 |
+
from langchain.agents.agent_toolkits import (
|
10 |
+
create_conversational_retrieval_agent, create_retriever_tool)
|
11 |
from langchain.callbacks.manager import CallbackManager
|
12 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
13 |
+
from langchain.chains import ConversationChain, RetrievalQA
|
14 |
+
from langchain.chat_models import ChatOpenAI
|
15 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
16 |
+
from langchain.llms import HuggingFaceTextGenInference, OpenAI
|
17 |
from langchain.prompts import PromptTemplate
|
18 |
+
from langchain.tools import tool
|
19 |
from langchain.vectorstores import FAISS
|
20 |
+
from pydantic import BaseModel, Field
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
|
23 |
def reset_textbox():
|
|
|
36 |
|
37 |
vectordb = FAISS.load_local('faissdb', embeddings=model_norm)
|
38 |
retriever = vectordb.as_retriever(
|
39 |
+
search_type='similarity', search_kwargs={"k": 2})
|
40 |
|
41 |
|
42 |
# relating to refer to Indian Penal Code(IPC), CrPC(Code of Criminal Procedure) for most cases and therefore laws
|
|
|
53 |
)
|
54 |
|
55 |
|
56 |
+
class SearchInput(BaseModel):
|
57 |
+
query: str = Field(description="should be a search query in string format")
|
58 |
+
|
59 |
+
|
60 |
+
@tool('search', args_schema=SearchInput)
|
61 |
+
def search(query: str) -> str:
|
62 |
+
"""Useful for retrieving documents related to Indian law."""
|
63 |
+
retriever = vectordb.as_retriever(
|
64 |
+
search_type='similarity', search_kwargs={"k": 2})
|
65 |
+
res = retriever.get_relevant_documents(query)
|
66 |
+
print(res)
|
67 |
+
return res
|
68 |
+
|
69 |
+
|
70 |
+
def load_chain():
|
71 |
+
# tool = create_retriever_tool(
|
72 |
+
# retriever,
|
73 |
+
# "search_legal_sections",
|
74 |
+
# "Searches and returns documents regarding Indian law. Accepts query as a string. For example: 'Section 298 of Indian Penal Code'."
|
75 |
+
# )
|
76 |
+
tools = [search]
|
77 |
+
llm = ChatOpenAI(openai_api_base='http://20.83.177.108:8080/v1',
|
78 |
+
openai_api_key='none',)
|
79 |
|
80 |
+
conv_agent_executor = create_conversational_retrieval_agent(
|
81 |
+
llm, tools, verbose=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
)
|
83 |
+
return conv_agent_executor
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
class ChatWrapper:
|
87 |
+
def __init__(self):
|
88 |
+
self.lock = Lock()
|
|
|
|
|
|
|
89 |
|
90 |
+
def __call__(
|
91 |
+
self, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
|
92 |
+
):
|
93 |
+
"""Execute the chat functionality."""
|
94 |
+
self.lock.acquire()
|
95 |
+
try:
|
96 |
+
history = history or []
|
97 |
+
# Run chain and append input.
|
98 |
+
# output = chain({'input': inp})
|
99 |
+
output = 'this is an output'
|
100 |
+
history.append((inp, output))
|
101 |
+
except Exception as e:
|
102 |
+
raise e
|
103 |
+
finally:
|
104 |
+
self.lock.release()
|
105 |
+
return history, history
|
106 |
|
107 |
|
108 |
+
chat = ChatWrapper()
|
109 |
+
|
110 |
+
block = gr.Blocks(css=".gradio-container {background-color: red}")
|
111 |
+
|
112 |
+
with block:
|
113 |
+
chatbot = gr.Chatbot()
|
114 |
+
|
115 |
with gr.Row():
|
116 |
+
message = gr.Textbox(
|
117 |
+
label="What's your question?",
|
118 |
+
placeholder="What's the answer to life, the universe, and everything?",
|
119 |
+
lines=1,
|
120 |
+
)
|
121 |
+
submit = gr.Button(value="Send", variant="secondary").style(
|
122 |
+
full_width=False)
|
123 |
+
|
124 |
+
gr.Examples(
|
125 |
+
examples=[
|
126 |
+
"Hi! How's it going?",
|
127 |
+
"What should I do tonight?",
|
128 |
+
"Whats 2 + 2?",
|
129 |
+
],
|
130 |
+
inputs=message,
|
131 |
+
)
|
132 |
+
|
133 |
+
state = gr.State()
|
134 |
+
agent_state = gr.State()
|
135 |
+
|
136 |
+
load_chain()
|
137 |
+
|
138 |
+
submit.click(chat, inputs=[message,
|
139 |
+
state, agent_state], outputs=[chatbot, state])
|
140 |
+
message.submit(chat, inputs=[
|
141 |
+
message, state, agent_state], outputs=[chatbot, state])
|
142 |
+
|
143 |
+
with gr.Blocks() as demo:
|
144 |
+
chatbot = gr.Chatbot()
|
145 |
+
msg = gr.Textbox()
|
146 |
+
clear = gr.ClearButton([msg, chatbot])
|
147 |
+
chain = load_chain()
|
148 |
+
|
149 |
+
def respond(message, chat_history):
|
150 |
+
print('message is', message)
|
151 |
+
bot_message = chain({'input': message})['output']
|
152 |
+
chat_history.append((message, bot_message))
|
153 |
+
time.sleep(2)
|
154 |
+
return "", chat_history
|
155 |
+
|
156 |
+
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
157 |
+
|
158 |
+
if __name__ == "__main__":
|
159 |
+
demo.launch()
|
gradio_app_old.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from threading import Thread
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from langchain.callbacks.manager import CallbackManager
|
5 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
6 |
+
from langchain.chains import RetrievalQA
|
7 |
+
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
8 |
+
from langchain.llms import HuggingFaceTextGenInference
|
9 |
+
from langchain.prompts import PromptTemplate
|
10 |
+
from langchain.vectorstores import FAISS
|
11 |
+
# import torch
|
12 |
+
from text_generation import Client, InferenceAPIClient
|
13 |
+
|
14 |
+
client = Client("http://20.83.177.108:8080")
|
15 |
+
|
16 |
+
|
17 |
+
def run_generation_stream(user_text, f, max_new_tokens, temperature):
|
18 |
+
# Get the model and tokenizer, and tokenize the user text.
|
19 |
+
print('called stream')
|
20 |
+
|
21 |
+
if len(user_text.strip()) == 0:
|
22 |
+
print('blank')
|
23 |
+
gr.Warning('Please enter a question to continue')
|
24 |
+
return
|
25 |
+
|
26 |
+
user_text = f"""You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a concise and factually correct manner. Also mention the relevant sections of the law wherever applicable.
|
27 |
+
### Input: {user_text}
|
28 |
+
### Response: """
|
29 |
+
|
30 |
+
text = ""
|
31 |
+
for response in client.generate_stream(user_text, max_new_tokens=max_new_tokens, repetition_penalty=1.05, temperature=temperature):
|
32 |
+
if not response.token.special:
|
33 |
+
text += response.token.text
|
34 |
+
yield text
|
35 |
+
|
36 |
+
return text
|
37 |
+
|
38 |
+
|
39 |
+
def reset_textbox():
|
40 |
+
return gr.update(value='')
|
41 |
+
|
42 |
+
|
43 |
+
model_name = "BAAI/bge-base-en"
|
44 |
+
# set True to compute cosine similarity
|
45 |
+
encode_kwargs = {'normalize_embeddings': True}
|
46 |
+
|
47 |
+
model_norm = HuggingFaceBgeEmbeddings(
|
48 |
+
model_name=model_name,
|
49 |
+
encode_kwargs=encode_kwargs
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
vectordb = FAISS.load_local('faissdb', embeddings=model_norm)
|
54 |
+
retriever = vectordb.as_retriever(
|
55 |
+
search_type='similarity', search_kwargs={"k": 5})
|
56 |
+
|
57 |
+
|
58 |
+
# relating to refer to Indian Penal Code(IPC), CrPC(Code of Criminal Procedure) for most cases and therefore laws
|
59 |
+
prompt_template = """You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a factually correct and consise manner unless asked for a detailed explanation. Assume the query is asked by a common man unless explicitly specified otherwise, therefore no special acts or laws like ones for railway , army , police would apply to them. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
60 |
+
|
61 |
+
{context}
|
62 |
+
|
63 |
+
Question: {question}
|
64 |
+
Response:"""
|
65 |
+
|
66 |
+
|
67 |
+
PROMPT = PromptTemplate(
|
68 |
+
template=prompt_template, input_variables=["context", "question"]
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
def run_generation(query, factual, max_tokens, temperature):
|
73 |
+
print('called non stream')
|
74 |
+
|
75 |
+
llm = HuggingFaceTextGenInference(
|
76 |
+
inference_server_url="http://20.83.177.108:8080/",
|
77 |
+
max_new_tokens=max_tokens,
|
78 |
+
top_k=10,
|
79 |
+
top_p=0.95,
|
80 |
+
typical_p=0.95,
|
81 |
+
temperature=temperature,
|
82 |
+
streaming=True if factual else False,
|
83 |
+
# repetition_penalty=1.1,
|
84 |
+
)
|
85 |
+
|
86 |
+
qa_chain = RetrievalQA.from_chain_type(llm=llm,
|
87 |
+
chain_type_kwargs={
|
88 |
+
"prompt": PROMPT},
|
89 |
+
retriever=retriever,
|
90 |
+
return_source_documents=True,
|
91 |
+
)
|
92 |
+
|
93 |
+
# text = ""
|
94 |
+
# if factual:
|
95 |
+
# response = llm(query, callbacks=[StreamingStdOutCallbackHandler()])
|
96 |
+
# print(response)
|
97 |
+
# # text += response
|
98 |
+
# yield response
|
99 |
+
|
100 |
+
# else:
|
101 |
+
llm_response = qa_chain(query)
|
102 |
+
print(llm_response['result'])
|
103 |
+
return llm_response['result']
|
104 |
+
|
105 |
+
|
106 |
+
with gr.Blocks() as demo:
|
107 |
+
with gr.Row():
|
108 |
+
with gr.Column(scale=4):
|
109 |
+
user_text = gr.Textbox(
|
110 |
+
placeholder="What is the punishment for taking dowry. explain in detail.",
|
111 |
+
label="Question"
|
112 |
+
)
|
113 |
+
model_output = gr.Textbox(
|
114 |
+
label="AI Response", lines=10, interactive=False)
|
115 |
+
button_submit = gr.Button(value="Submit")
|
116 |
+
|
117 |
+
with gr.Column(scale=1):
|
118 |
+
max_new_tokens = gr.Slider(
|
119 |
+
minimum=1, maximum=1000, value=250, step=10, interactive=True, label="Number of words to generate",
|
120 |
+
)
|
121 |
+
temperature = gr.Slider(
|
122 |
+
minimum=0.1, maximum=1.0, value=0.6, step=0.1, interactive=True, label="Randomness(can be between 0-1, 0 being least random)",
|
123 |
+
)
|
124 |
+
factual = gr.Checkbox(
|
125 |
+
label='Turn on to get factually correct answers')
|
126 |
+
|
127 |
+
# user_text.submit(run_generation, [
|
128 |
+
# user_text, top_p, temperature, top_k, max_new_tokens], model_output)
|
129 |
+
# button_submit.click(run_generation, [
|
130 |
+
# user_text, top_p, temperature, top_k, max_new_tokens], model_output)
|
131 |
+
|
132 |
+
# user_text.submit(run_generation, [
|
133 |
+
# user_text, factual, max_new_tokens, temperature], model_output)
|
134 |
+
print('fac', factual.value)
|
135 |
+
button_submit.click(run_generation if factual.value else run_generation_stream, [
|
136 |
+
user_text, factual, max_new_tokens, temperature], model_output)
|
137 |
+
|
138 |
+
demo.queue(max_size=32).launch(enable_queue=True)
|
langchain_qwen.ipynb
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"# from langchain.vectorstores import Chroma\n",
|
10 |
+
"from langchain.vectorstores import FAISS\n",
|
11 |
+
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
12 |
+
"\n",
|
13 |
+
"from langchain.chains import RetrievalQA\n",
|
14 |
+
"from langchain.document_loaders import TextLoader\n",
|
15 |
+
"from langchain.document_loaders import PyPDFLoader\n",
|
16 |
+
"from langchain.document_loaders import DirectoryLoader\n"
|
17 |
+
]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"cell_type": "code",
|
21 |
+
"execution_count": null,
|
22 |
+
"metadata": {},
|
23 |
+
"outputs": [],
|
24 |
+
"source": [
|
25 |
+
"from langchain.embeddings import HuggingFaceBgeEmbeddings\n",
|
26 |
+
"\n",
|
27 |
+
"model_name = \"BAAI/bge-base-en\"\n",
|
28 |
+
"# set True to compute cosine similarity\n",
|
29 |
+
"encode_kwargs = {'normalize_embeddings': True}\n",
|
30 |
+
"\n",
|
31 |
+
"model_norm = HuggingFaceBgeEmbeddings(\n",
|
32 |
+
" model_name=model_name,\n",
|
33 |
+
" model_kwargs={'device': 'cpu'},\n",
|
34 |
+
" encode_kwargs=encode_kwargs\n",
|
35 |
+
")\n"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": null,
|
41 |
+
"metadata": {},
|
42 |
+
"outputs": [],
|
43 |
+
"source": [
|
44 |
+
"vectordb = FAISS.load_local('faissdb',embeddings=model_norm)"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"cell_type": "code",
|
49 |
+
"execution_count": null,
|
50 |
+
"metadata": {},
|
51 |
+
"outputs": [],
|
52 |
+
"source": [
|
53 |
+
"retriever = vectordb.as_retriever(search_type='similarity', search_kwargs={\"k\": 2})\n",
|
54 |
+
"a = retriever.get_relevant_documents('Indian Penal Code 133')\n",
|
55 |
+
"print([aa.metadata for aa in a])\n",
|
56 |
+
"# a"
|
57 |
+
]
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"cell_type": "code",
|
61 |
+
"execution_count": null,
|
62 |
+
"metadata": {},
|
63 |
+
"outputs": [],
|
64 |
+
"source": [
|
65 |
+
"from langchain.chat_models import ChatOpenAI\n",
|
66 |
+
"\n",
|
67 |
+
"llm = ChatOpenAI(openai_api_base='http://20.83.177.108:8080/v1',\n",
|
68 |
+
" openai_api_key='none',)\n"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"cell_type": "markdown",
|
73 |
+
"metadata": {},
|
74 |
+
"source": [
|
75 |
+
"SIMPLE RETRIEVER"
|
76 |
+
]
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"cell_type": "code",
|
80 |
+
"execution_count": null,
|
81 |
+
"metadata": {},
|
82 |
+
"outputs": [],
|
83 |
+
"source": [
|
84 |
+
"from langchain.prompts import PromptTemplate\n",
|
85 |
+
"from langchain.agents.agent_toolkits import create_retriever_tool\n",
|
86 |
+
"from langchain.agents.agent_toolkits import create_conversational_retrieval_agent\n",
|
87 |
+
"from langchain.agents.openai_functions_agent.agent_token_buffer_memory import AgentTokenBufferMemory\n",
|
88 |
+
"from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n",
|
89 |
+
"from langchain.schema.messages import SystemMessage\n",
|
90 |
+
"from langchain.prompts import MessagesPlaceholder\n",
|
91 |
+
"from langchain.chains import ConversationalRetrievalChain\n",
|
92 |
+
"from langchain.memory import ConversationBufferMemory\n",
|
93 |
+
"from langchain.tools import tool\n",
|
94 |
+
"from pydantic import BaseModel, Field\n",
|
95 |
+
"\n",
|
96 |
+
"memory = ConversationBufferMemory(\n",
|
97 |
+
" memory_key=\"chat_history\", return_messages=True)\n",
|
98 |
+
"\n",
|
99 |
+
"# This is needed for both the memory and the prompt\n",
|
100 |
+
"\n",
|
101 |
+
"# memory_key = \"history\"\n",
|
102 |
+
"# memory = AgentTokenBufferMemory(memory_key=memory_key, llm=llm)\n",
|
103 |
+
"\n",
|
104 |
+
"\n",
|
105 |
+
"prompt_template = \"\"\"You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a consice and factually correct manner. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
|
106 |
+
"\n",
|
107 |
+
"{context}\n",
|
108 |
+
"\n",
|
109 |
+
"Question: {question}\n",
|
110 |
+
"Response:\"\"\"\n",
|
111 |
+
"\n",
|
112 |
+
"\n",
|
113 |
+
"PROMPT = PromptTemplate(\n",
|
114 |
+
" template=prompt_template, input_variables=[\"context\", \"question\"]\n",
|
115 |
+
")\n",
|
116 |
+
"\n",
|
117 |
+
"\n",
|
118 |
+
"class SearchInput(BaseModel):\n",
|
119 |
+
" query: str = Field(description=\"should be a search query in string format\")\n",
|
120 |
+
"\n",
|
121 |
+
"\n",
|
122 |
+
"@tool('search', args_schema=SearchInput)\n",
|
123 |
+
"def search(query: str) -> str:\n",
|
124 |
+
" \"\"\"Useful for retrieving documents related to Indian law.\"\"\"\n",
|
125 |
+
" retriever = vectordb.as_retriever(\n",
|
126 |
+
" search_type='similarity', search_kwargs={\"k\": 2})\n",
|
127 |
+
" res = retriever.get_relevant_documents(query)\n",
|
128 |
+
" print(res)\n",
|
129 |
+
" return res\n",
|
130 |
+
"\n",
|
131 |
+
"\n",
|
132 |
+
"\n",
|
133 |
+
"tool = create_retriever_tool(\n",
|
134 |
+
" retriever,\n",
|
135 |
+
" \"search_legal_sections\",\n",
|
136 |
+
" \"Searches and returns documents regarding Indian legal acts and sections.\"\n",
|
137 |
+
")\n",
|
138 |
+
"tools = [tool]\n"
|
139 |
+
]
|
140 |
+
},
|
141 |
+
{
|
142 |
+
"cell_type": "code",
|
143 |
+
"execution_count": null,
|
144 |
+
"metadata": {},
|
145 |
+
"outputs": [],
|
146 |
+
"source": [
|
147 |
+
"print(type(tool),type(search))"
|
148 |
+
]
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"cell_type": "markdown",
|
152 |
+
"metadata": {},
|
153 |
+
"source": [
|
154 |
+
"QA Chain"
|
155 |
+
]
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"cell_type": "code",
|
159 |
+
"execution_count": null,
|
160 |
+
"metadata": {},
|
161 |
+
"outputs": [],
|
162 |
+
"source": [
|
163 |
+
"qa_chain = RetrievalQA.from_chain_type(llm=llm,\n",
|
164 |
+
" chain_type_kwargs={\"prompt\": PROMPT},\n",
|
165 |
+
" retriever=retriever,\n",
|
166 |
+
" return_source_documents=False,\n",
|
167 |
+
" )\n",
|
168 |
+
"\n",
|
169 |
+
"conv_qa_chain = ConversationalRetrievalChain.from_llm(\n",
|
170 |
+
" llm, retriever, memory=memory, verbose=True,\n",
|
171 |
+
")"
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"cell_type": "markdown",
|
176 |
+
"metadata": {},
|
177 |
+
"source": [
|
178 |
+
"ReAct"
|
179 |
+
]
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"cell_type": "code",
|
183 |
+
"execution_count": 111,
|
184 |
+
"metadata": {},
|
185 |
+
"outputs": [
|
186 |
+
{
|
187 |
+
"ename": "NameError",
|
188 |
+
"evalue": "name 'HuggingFaceTextGenInference' is not defined",
|
189 |
+
"output_type": "error",
|
190 |
+
"traceback": [
|
191 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
192 |
+
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
193 |
+
"\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb Cell 12\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m llm \u001b[39m=\u001b[39m HuggingFaceTextGenInference(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=1'>2</a>\u001b[0m inference_server_url\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mhttp://20.83.177.108:8080/\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m max_new_tokens\u001b[39m=\u001b[39m\u001b[39m512\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m top_k\u001b[39m=\u001b[39m\u001b[39m10\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m top_p\u001b[39m=\u001b[39m\u001b[39m0.95\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=5'>6</a>\u001b[0m typical_p\u001b[39m=\u001b[39m\u001b[39m0.95\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=6'>7</a>\u001b[0m temperature\u001b[39m=\u001b[39m\u001b[39m0.6\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m \u001b[39m# repetition_penalty=1.1,\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X41sZmlsZQ%3D%3D?line=8'>9</a>\u001b[0m )\n",
|
194 |
+
"\u001b[0;31mNameError\u001b[0m: name 'HuggingFaceTextGenInference' is not defined"
|
195 |
+
]
|
196 |
+
}
|
197 |
+
],
|
198 |
+
"source": [
|
199 |
+
"from langchain.llms import HuggingFaceTextGenInference\n",
|
200 |
+
"\n",
|
201 |
+
"llm = HuggingFaceTextGenInference(\n",
|
202 |
+
" inference_server_url=\"http://20.83.177.108:8080/\",\n",
|
203 |
+
" max_new_tokens=2000,\n",
|
204 |
+
" # top_k=10,\n",
|
205 |
+
" # top_p=0.95,\n",
|
206 |
+
" # typical_p=0.95,\n",
|
207 |
+
" # temperature=0.6,\n",
|
208 |
+
" # repetition_penalty=1.1,\n",
|
209 |
+
")\n"
|
210 |
+
]
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"cell_type": "code",
|
214 |
+
"execution_count": null,
|
215 |
+
"metadata": {},
|
216 |
+
"outputs": [],
|
217 |
+
"source": [
|
218 |
+
"from langchain.agents import initialize_agent, Tool\n",
|
219 |
+
"from langchain.agents import AgentType\n",
|
220 |
+
"\n",
|
221 |
+
"\n",
|
222 |
+
"\n",
|
223 |
+
"agent_executor = initialize_agent(\n",
|
224 |
+
" tools, \n",
|
225 |
+
" llm, \n",
|
226 |
+
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, \n",
|
227 |
+
" verbose=True,\n",
|
228 |
+
")\n",
|
229 |
+
"\n",
|
230 |
+
"conv_agent_executor = create_conversational_retrieval_agent(\n",
|
231 |
+
" llm, tools, verbose=True,\n",
|
232 |
+
")\n"
|
233 |
+
]
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"cell_type": "code",
|
237 |
+
"execution_count": null,
|
238 |
+
"metadata": {},
|
239 |
+
"outputs": [],
|
240 |
+
"source": [
|
241 |
+
"# {'input': 'How is section 308 of Indian Penal Code different from section 299?'}\n",
|
242 |
+
"conv_agent_executor(\n",
|
243 |
+
" {'input': 'Sorry i meant 299.'}\n",
|
244 |
+
" )\n"
|
245 |
+
]
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"cell_type": "markdown",
|
249 |
+
"metadata": {},
|
250 |
+
"source": [
|
251 |
+
"Flare"
|
252 |
+
]
|
253 |
+
},
|
254 |
+
{
|
255 |
+
"cell_type": "code",
|
256 |
+
"execution_count": 110,
|
257 |
+
"metadata": {},
|
258 |
+
"outputs": [
|
259 |
+
{
|
260 |
+
"ename": "ValidationError",
|
261 |
+
"evalue": "1 validation error for OpenAI\n__root__\n Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)",
|
262 |
+
"output_type": "error",
|
263 |
+
"traceback": [
|
264 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
265 |
+
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
266 |
+
"\u001b[1;32m/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb Cell 15\u001b[0m line \u001b[0;36m8\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mlangchain\u001b[39;00m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m langchain\u001b[39m.\u001b[39mverbose \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m flare \u001b[39m=\u001b[39m FlareChain\u001b[39m.\u001b[39;49mfrom_llm(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=8'>9</a>\u001b[0m ChatOpenAI(openai_api_base\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39mhttp://20.83.177.108:8080/v1\u001b[39;49m\u001b[39m'\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=9'>10</a>\u001b[0m openai_api_key\u001b[39m=\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39mnone\u001b[39;49m\u001b[39m'\u001b[39;49m),\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m retriever\u001b[39m=\u001b[39;49mretriever,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m max_generation_len\u001b[39m=\u001b[39;49m\u001b[39m164\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m min_prob\u001b[39m=\u001b[39;49m\u001b[39m0.3\u001b[39;49m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=13'>14</a>\u001b[0m )\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/tejasw/Downloads/scraper-law/votum-gradio/langchain_qwen.ipynb#X40sZmlsZQ%3D%3D?line=15'>16</a>\u001b[0m query \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mexplain in great detail the difference between the langchain framework and baby agi\u001b[39m\u001b[39m\"\u001b[39m\n",
|
267 |
+
"File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/langchain/chains/flare/base.py:249\u001b[0m, in \u001b[0;36mFlareChain.from_llm\u001b[0;34m(cls, llm, max_generation_len, **kwargs)\u001b[0m\n\u001b[1;32m 238\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Creates a FlareChain from a language model.\u001b[39;00m\n\u001b[1;32m 239\u001b[0m \n\u001b[1;32m 240\u001b[0m \u001b[39mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[39m FlareChain class with the given language model.\u001b[39;00m\n\u001b[1;32m 247\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 248\u001b[0m question_gen_chain \u001b[39m=\u001b[39m QuestionGeneratorChain(llm\u001b[39m=\u001b[39mllm)\n\u001b[0;32m--> 249\u001b[0m response_llm \u001b[39m=\u001b[39m OpenAI(\n\u001b[1;32m 250\u001b[0m max_tokens\u001b[39m=\u001b[39;49mmax_generation_len, model_kwargs\u001b[39m=\u001b[39;49m{\u001b[39m\"\u001b[39;49m\u001b[39mlogprobs\u001b[39;49m\u001b[39m\"\u001b[39;49m: \u001b[39m1\u001b[39;49m}, temperature\u001b[39m=\u001b[39;49m\u001b[39m0\u001b[39;49m\n\u001b[1;32m 251\u001b[0m )\n\u001b[1;32m 252\u001b[0m response_chain \u001b[39m=\u001b[39m _OpenAIResponseChain(llm\u001b[39m=\u001b[39mresponse_llm)\n\u001b[1;32m 253\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mcls\u001b[39m(\n\u001b[1;32m 254\u001b[0m question_generator_chain\u001b[39m=\u001b[39mquestion_gen_chain,\n\u001b[1;32m 255\u001b[0m response_chain\u001b[39m=\u001b[39mresponse_chain,\n\u001b[1;32m 256\u001b[0m \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs,\n\u001b[1;32m 257\u001b[0m )\n",
|
268 |
+
"File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/langchain/load/serializable.py:75\u001b[0m, in \u001b[0;36mSerializable.__init__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs: Any) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m---> 75\u001b[0m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__init__\u001b[39;49m(\u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 76\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lc_kwargs \u001b[39m=\u001b[39m kwargs\n",
|
269 |
+
"File \u001b[0;32m~/.pyenv/versions/3.11.2/lib/python3.11/site-packages/pydantic/main.py:342\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
|
270 |
+
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for OpenAI\n__root__\n Did not find openai_api_key, please add an environment variable `OPENAI_API_KEY` which contains it, or pass `openai_api_key` as a named parameter. (type=value_error)"
|
271 |
+
]
|
272 |
+
}
|
273 |
+
],
|
274 |
+
"source": [
|
275 |
+
"# We set this so we can see what exactly is going on\n",
|
276 |
+
"from langchain.chains import FlareChain\n",
|
277 |
+
"import langchain\n",
|
278 |
+
"\n",
|
279 |
+
"langchain.verbose = True\n",
|
280 |
+
"\n",
|
281 |
+
"\n",
|
282 |
+
"# flare = FlareChain.from_llm(\n",
|
283 |
+
"# llm,\n",
|
284 |
+
"# retriever=retriever,\n",
|
285 |
+
"# max_generation_len=164,\n",
|
286 |
+
"# min_prob=0.3,\n",
|
287 |
+
"# )\n",
|
288 |
+
"\n",
|
289 |
+
"query = \"explain in great detail the difference between the langchain framework and baby agi\"\n",
|
290 |
+
"print(llm)\n"
|
291 |
+
]
|
292 |
+
},
|
293 |
+
{
|
294 |
+
"cell_type": "markdown",
|
295 |
+
"metadata": {},
|
296 |
+
"source": [
|
297 |
+
"Plan and Execute"
|
298 |
+
]
|
299 |
+
},
|
300 |
+
{
|
301 |
+
"cell_type": "code",
|
302 |
+
"execution_count": null,
|
303 |
+
"metadata": {},
|
304 |
+
"outputs": [],
|
305 |
+
"source": [
|
306 |
+
"from langchain.chat_models import ChatOpenAI\n",
|
307 |
+
"from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n",
|
308 |
+
"from langchain.llms import OpenAI\n",
|
309 |
+
"from langchain.utilities import SerpAPIWrapper\n",
|
310 |
+
"from langchain.agents.tools import Tool\n",
|
311 |
+
"from langchain.chains import LLMMathChain\n",
|
312 |
+
"\n",
|
313 |
+
"planner = load_chat_planner(llm)\n",
|
314 |
+
"executor = load_agent_executor(llm, [search], verbose=True)\n",
|
315 |
+
"plan_agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)\n",
|
316 |
+
"\n",
|
317 |
+
"\n",
|
318 |
+
"plan_agent.run('I bought a house in 2001 for 20 lakh rupees , i sold it in 2022 for 50 lakhs , what will be my profit?')\n"
|
319 |
+
]
|
320 |
+
},
|
321 |
+
{
|
322 |
+
"cell_type": "markdown",
|
323 |
+
"metadata": {},
|
324 |
+
"source": [
|
325 |
+
"DOCSTORE"
|
326 |
+
]
|
327 |
+
},
|
328 |
+
{
|
329 |
+
"cell_type": "code",
|
330 |
+
"execution_count": null,
|
331 |
+
"metadata": {},
|
332 |
+
"outputs": [],
|
333 |
+
"source": [
|
334 |
+
"from langchain.llms import OpenAI\n",
|
335 |
+
"from langchain.agents import initialize_agent, Tool\n",
|
336 |
+
"from langchain.agents import AgentType\n",
|
337 |
+
"from langchain.agents.react.base import DocstoreExplorer\n",
|
338 |
+
"\n",
|
339 |
+
"\n",
|
340 |
+
"docstore = DocstoreExplorer(vectordb)\n",
|
341 |
+
"tools = [\n",
|
342 |
+
" Tool(\n",
|
343 |
+
" name=\"Search\",\n",
|
344 |
+
" func=docstore.search,\n",
|
345 |
+
" description=\"useful for when you need to ask with search\",\n",
|
346 |
+
" ),\n",
|
347 |
+
" Tool(\n",
|
348 |
+
" name=\"Lookup\",\n",
|
349 |
+
" func=docstore.lookup,\n",
|
350 |
+
" description=\"useful for when you need to ask with lookup\",\n",
|
351 |
+
" ),\n",
|
352 |
+
"]\n",
|
353 |
+
"\n",
|
354 |
+
"\n",
|
355 |
+
"react = initialize_agent(\n",
|
356 |
+
" tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)\n",
|
357 |
+
"\n",
|
358 |
+
"\n"
|
359 |
+
]
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"cell_type": "code",
|
363 |
+
"execution_count": null,
|
364 |
+
"metadata": {},
|
365 |
+
"outputs": [],
|
366 |
+
"source": []
|
367 |
+
}
|
368 |
+
],
|
369 |
+
"metadata": {
|
370 |
+
"kernelspec": {
|
371 |
+
"display_name": "Python 3",
|
372 |
+
"language": "python",
|
373 |
+
"name": "python3"
|
374 |
+
},
|
375 |
+
"language_info": {
|
376 |
+
"codemirror_mode": {
|
377 |
+
"name": "ipython",
|
378 |
+
"version": 3
|
379 |
+
},
|
380 |
+
"file_extension": ".py",
|
381 |
+
"mimetype": "text/x-python",
|
382 |
+
"name": "python",
|
383 |
+
"nbconvert_exporter": "python",
|
384 |
+
"pygments_lexer": "ipython3",
|
385 |
+
"version": "3.11.2"
|
386 |
+
},
|
387 |
+
"orig_nbformat": 4
|
388 |
+
},
|
389 |
+
"nbformat": 4,
|
390 |
+
"nbformat_minor": 2
|
391 |
+
}
|
langchain_retreival.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|