working
Browse files- .gitignore +2 -1
- agent/_create.py +48 -23
- agent/datastructures.py +14 -10
- agent/memory.py +4 -0
- agent/prompt.py +18 -39
- agent/toolset.py +40 -14
- app.py +4 -1
- requirements.txt +15 -0
- test.py +62 -0
- train/faq.py +8 -6
- train/posts.csv +0 -0
.gitignore
CHANGED
@@ -6,4 +6,5 @@ __pycache__/
|
|
6 |
_rise_faq_db/
|
7 |
|
8 |
/_rise_product_db/
|
9 |
-
_rise_product_db/
|
|
|
|
6 |
_rise_faq_db/
|
7 |
|
8 |
/_rise_product_db/
|
9 |
+
_rise_product_db/
|
10 |
+
_chat_history.sqlite
|
agent/_create.py
CHANGED
@@ -1,29 +1,54 @@
|
|
1 |
|
2 |
def agent(payload):
|
3 |
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
from agent.toolset import tools
|
7 |
from agent.prompt import prompt
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
from langchain.agents import AgentExecutor, create_react_agent
|
12 |
-
agent = create_react_agent(llm, tools, prompt)
|
13 |
-
|
14 |
-
from langchain.memory import ConversationSummaryMemory, ChatMessageHistory
|
15 |
-
|
16 |
-
memory = ConversationSummaryMemory(
|
17 |
-
llm=OpenAI(),
|
18 |
-
memory_key="chat_summary",
|
19 |
-
buffer=payload.get('chat_summary') or "",
|
20 |
-
return_messages=True
|
21 |
-
)
|
22 |
-
|
23 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
|
24 |
-
response = agent_executor.invoke({"input": payload.get('input') or "no question" })
|
25 |
-
response = response['output']
|
26 |
-
response = response.removesuffix("}")
|
27 |
-
response = response + ", \"chat_summary\": \"" + memory.buffer.strip()+ "\"}"
|
28 |
-
|
29 |
-
return response
|
|
|
1 |
|
2 |
def agent(payload):
|
3 |
|
4 |
+
from agent.toolset import tool_executor, converted_tools
|
5 |
+
|
6 |
+
from langchain_openai import ChatOpenAI
|
7 |
+
model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
8 |
+
model = model.bind_functions(converted_tools)
|
9 |
+
|
10 |
+
from langgraph.prebuilt import ToolInvocation
|
11 |
+
import json
|
12 |
+
from langchain_core.messages import FunctionMessage
|
13 |
+
|
14 |
+
def should_continue(messages):
|
15 |
+
last_message = messages[-1]
|
16 |
+
if "function_call" not in last_message.additional_kwargs: return "end"
|
17 |
+
else: return "continue"
|
18 |
+
|
19 |
+
def call_model(messages):
|
20 |
+
response = model.invoke(messages)
|
21 |
+
return response
|
22 |
+
|
23 |
+
def call_tool(messages):
|
24 |
+
last_message = messages[-1]
|
25 |
+
action = ToolInvocation(
|
26 |
+
tool=last_message.additional_kwargs["function_call"]["name"],
|
27 |
+
tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
|
28 |
+
)
|
29 |
+
response = tool_executor.invoke(action)
|
30 |
+
function_message = FunctionMessage(content=str(response), name=action.tool)
|
31 |
+
return function_message
|
32 |
+
|
33 |
+
from langgraph.graph import MessageGraph, END
|
34 |
+
workflow = MessageGraph()
|
35 |
+
|
36 |
+
workflow.add_node("agent", call_model)
|
37 |
+
workflow.add_node("action", call_tool)
|
38 |
+
workflow.set_entry_point("agent")
|
39 |
+
workflow.add_conditional_edges("agent", should_continue, {"continue": "action", "end": END} )
|
40 |
+
workflow.add_edge('action', 'agent')
|
41 |
+
|
42 |
+
from agent.memory import memory,ThreadStatus,threadID
|
43 |
+
app = workflow.compile(checkpointer=memory)
|
44 |
+
|
45 |
|
|
|
46 |
from agent.prompt import prompt
|
47 |
+
prompt=prompt[ThreadStatus]
|
48 |
+
prompt = prompt.format(input="hi! I'm bob")
|
49 |
+
|
50 |
+
response = app.invoke(prompt, {"configurable": {"thread_id": threadID}})
|
51 |
+
print(response[-1].content)
|
52 |
|
53 |
+
return response[-1].content
|
54 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agent/datastructures.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
|
2 |
-
from typing import List
|
|
|
3 |
from langchain_core.pydantic_v1 import BaseModel, Field
|
4 |
from langchain.output_parsers import PydanticOutputParser
|
5 |
|
@@ -8,20 +9,23 @@ class InputSchema(BaseModel):
|
|
8 |
"""Expect the input from the frontend to be a JSON object with this structure"""
|
9 |
question: str = Field(description="The enquiry that is passed from the user")
|
10 |
|
|
|
|
|
|
|
|
|
11 |
# Define your desired data structure.
|
12 |
class FrontEndActions(BaseModel):
|
13 |
"""Structure to pass actions back to the frontend"""
|
14 |
-
|
15 |
-
|
|
|
|
|
16 |
|
17 |
class ResponseSchema(BaseModel):
|
18 |
-
"""Final response to the question being asked."""
|
19 |
message: str = Field(description="final answer to respond to the user")
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
#actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
|
24 |
-
#tokens: int = Field(description="Count the number of used to produce the response. Omit this field if you do not want to count tokens.")
|
25 |
-
#cost: int = Field(description="Provide the cost of the response based on tokens used. Omit this field if you cannot provide the information reliably")
|
26 |
|
27 |
parser = PydanticOutputParser(pydantic_object=ResponseSchema)
|
|
|
1 |
|
2 |
+
from typing import List, Optional
|
3 |
+
from enum import Enum
|
4 |
from langchain_core.pydantic_v1 import BaseModel, Field
|
5 |
from langchain.output_parsers import PydanticOutputParser
|
6 |
|
|
|
9 |
"""Expect the input from the frontend to be a JSON object with this structure"""
|
10 |
question: str = Field(description="The enquiry that is passed from the user")
|
11 |
|
12 |
+
class ActionTypes(str, Enum):
|
13 |
+
SuggestGoal = "SuggestGoal"
|
14 |
+
SuggestActivity = "VisitPage"
|
15 |
+
|
16 |
# Define your desired data structure.
|
17 |
class FrontEndActions(BaseModel):
|
18 |
"""Structure to pass actions back to the frontend"""
|
19 |
+
heading: str = Field(description="The heading text to display on the button")
|
20 |
+
detail: str = Field(description="More detailed information, for instance explaining why you have chosen this action for the user")
|
21 |
+
id: int = Field(description="The ID of the object that is referenced")
|
22 |
+
type: ActionTypes = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestActivity")
|
23 |
|
24 |
class ResponseSchema(BaseModel):
|
25 |
+
"""Final response to the question being asked. Some of the fields are optional"""
|
26 |
message: str = Field(description="final answer to respond to the user")
|
27 |
+
thread_id: int = Field(description="The ID of the checkpointer memory thread that this response is associated with. This is used to keep track of the conversation.")
|
28 |
+
tools: Optional[List[str]] = Field(description="A list of the tools used to generate the response.")
|
29 |
+
actions: Optional[List[FrontEndActions]] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
|
|
|
|
|
|
|
30 |
|
31 |
parser = PydanticOutputParser(pydantic_object=ResponseSchema)
|
agent/memory.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
2 |
+
memory = SqliteSaver.from_conn_string(":memory:")
|
3 |
+
ThreadStatus=0
|
4 |
+
threadID=2
|
agent/prompt.py
CHANGED
@@ -1,45 +1,24 @@
|
|
1 |
-
from langchain_core.prompts import
|
2 |
from agent.datastructures import parser
|
3 |
|
4 |
-
prompt =
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
You are RiseBot.
|
8 |
-
You work for Manchester Metropolitan University's Future Me and Rise offers: supporting students to make the most of the opportunities available to them.
|
9 |
|
10 |
-
|
11 |
-
Please only answer questions about Future me and Rise using the tool provided.
|
12 |
-
If you are unable to answer the question, do not make something up - admit it and recommend that they contact [email protected]
|
13 |
|
14 |
-
|
15 |
-
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Action: the action to take, should be one of [{tool_names}]
|
26 |
-
Action Input: the input to the action
|
27 |
-
Observation: the result of the action
|
28 |
-
```
|
29 |
-
|
30 |
-
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
|
31 |
-
|
32 |
-
```
|
33 |
-
Final Answer: [your response here. {response_format}]
|
34 |
-
```
|
35 |
-
|
36 |
-
Begin!
|
37 |
-
|
38 |
-
Previous conversation history:
|
39 |
-
{chat_summary}
|
40 |
-
|
41 |
-
New input: {input}
|
42 |
-
{agent_scratchpad}
|
43 |
-
|
44 |
-
""",
|
45 |
-
partial_variables={"response_format": parser.get_format_instructions()})
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate,SystemMessagePromptTemplate
|
2 |
from agent.datastructures import parser
|
3 |
|
4 |
+
prompt = {
|
5 |
+
0: # IF THE THREAD IS NEW, THE CHATBOT NEEDS TO BE PUMP-PROMPTED
|
6 |
+
ChatPromptTemplate.from_messages([
|
7 |
+
SystemMessagePromptTemplate.from_template("""
|
8 |
|
9 |
+
You are an assistant that only responds in JSON. Do not write normal text.
|
|
|
|
|
10 |
|
11 |
+
[no prose][Output only valid JSON]
|
|
|
|
|
12 |
|
13 |
+
{response_format}
|
14 |
+
|
15 |
+
The thread_id of this conversation is 2.
|
16 |
|
17 |
+
"""
|
18 |
+
|
19 |
+
).format(response_format=parser.get_format_instructions()),
|
20 |
+
("human", "{input}")
|
21 |
+
]),
|
22 |
+
1: # IF THREAD IS CONTINUING, WE CAN RELY ON THE ORIGINAL PROMPT
|
23 |
+
ChatPromptTemplate.from_messages([("human", "{input}")])
|
24 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
agent/toolset.py
CHANGED
@@ -3,37 +3,63 @@ from langchain_openai import OpenAIEmbeddings
|
|
3 |
from langchain_community.vectorstores.faiss import FAISS
|
4 |
from langchain.chains import RetrievalQA
|
5 |
from langchain_openai import OpenAI
|
6 |
-
|
7 |
|
8 |
@tool
|
9 |
def frequently_asked_questions(input: str):
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Load from local storage
|
13 |
embeddings = OpenAIEmbeddings()
|
14 |
persisted_vectorstore = FAISS.load_local("_rise_faq_db", embeddings)
|
15 |
|
16 |
# Use RetrievalQA chain for orchestration
|
17 |
-
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=persisted_vectorstore.as_retriever())
|
18 |
result = qa.invoke(input)
|
19 |
return result
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
"""Recommends an activity from Rise catalogue."""
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
embeddings = OpenAIEmbeddings()
|
27 |
persisted_vectorstore = FAISS.load_local("_rise_product_db", embeddings)
|
28 |
|
|
|
|
|
|
|
|
|
29 |
# Use RetrievalQA chain for orchestration
|
30 |
-
qa = RetrievalQA.from_chain_type(llm=
|
31 |
-
result = qa.invoke(
|
32 |
return result
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
|
|
|
3 |
from langchain_community.vectorstores.faiss import FAISS
|
4 |
from langchain.chains import RetrievalQA
|
5 |
from langchain_openai import OpenAI
|
6 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
7 |
|
8 |
@tool
|
9 |
def frequently_asked_questions(input: str):
|
10 |
+
|
11 |
+
"""
|
12 |
+
Provides answers to questions about Rise and Futureme.
|
13 |
+
Please always use this tool if the user has questions.
|
14 |
+
If you cannot answer the query with the tool, then you should recommend they contact [email protected]
|
15 |
+
"""
|
16 |
|
17 |
# Load from local storage
|
18 |
embeddings = OpenAIEmbeddings()
|
19 |
persisted_vectorstore = FAISS.load_local("_rise_faq_db", embeddings)
|
20 |
|
21 |
# Use RetrievalQA chain for orchestration
|
22 |
+
qa = RetrievalQA.from_chain_type(llm=OpenAI(model="gpt-3.5-turbo-instruct", temperature=0), chain_type="stuff", retriever=persisted_vectorstore.as_retriever())
|
23 |
result = qa.invoke(input)
|
24 |
return result
|
25 |
|
26 |
+
class RecommendActivityInput(BaseModel):
|
27 |
+
profile: str = Field(description="should be a penportrait of the user describing their interests and objectives. If they have a specific thing they are interested in, it should state that")
|
|
|
28 |
|
29 |
+
|
30 |
+
@tool("recommend_activity", args_schema=RecommendActivityInput, return_direct=False)
|
31 |
+
def recommend_activity(profile: str) -> str:
|
32 |
+
|
33 |
+
"""
|
34 |
+
Use this to search the Rise portfolio for relevant activities
|
35 |
+
"""
|
36 |
+
|
37 |
+
# Load from local storage
|
38 |
embeddings = OpenAIEmbeddings()
|
39 |
persisted_vectorstore = FAISS.load_local("_rise_product_db", embeddings)
|
40 |
|
41 |
+
# Set Up LLM
|
42 |
+
from agent.prompt import prompt
|
43 |
+
llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0)
|
44 |
+
|
45 |
# Use RetrievalQA chain for orchestration
|
46 |
+
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=persisted_vectorstore.as_retriever(),chain_type_kwargs={"prompt": "speak like a pirate"})
|
47 |
+
result = qa.invoke("recommend an activity relevant to the following profile: "+profile)
|
48 |
return result
|
49 |
|
50 |
+
tools = [frequently_asked_questions]
|
51 |
+
|
52 |
+
|
53 |
+
## NEW FROM HERE
|
54 |
+
|
55 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
56 |
+
|
57 |
+
tools = [TavilySearchResults(max_results=1)]
|
58 |
+
|
59 |
+
from langgraph.prebuilt import ToolExecutor
|
60 |
+
|
61 |
+
tool_executor = ToolExecutor(tools)
|
62 |
+
|
63 |
+
from langchain_core.utils.function_calling import convert_to_openai_function
|
64 |
|
65 |
+
converted_tools = [convert_to_openai_function(t) for t in tools]
|
app.py
CHANGED
@@ -24,4 +24,7 @@ def train_faq():
|
|
24 |
@app.route("/train/products", methods=['GET','POST'])
|
25 |
def train_products():
|
26 |
from train.products import train
|
27 |
-
return train();
|
|
|
|
|
|
|
|
24 |
@app.route("/train/products", methods=['GET','POST'])
|
25 |
def train_products():
|
26 |
from train.products import train
|
27 |
+
return train();
|
28 |
+
|
29 |
+
from agent._create import agent
|
30 |
+
agent({})
|
requirements.txt
CHANGED
@@ -14,3 +14,18 @@ sentence-transformers
|
|
14 |
datasets
|
15 |
faiss-cpu
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
datasets
|
15 |
faiss-cpu
|
16 |
|
17 |
+
#LLAMA TEST
|
18 |
+
torch
|
19 |
+
transformers
|
20 |
+
langchain
|
21 |
+
chromadb
|
22 |
+
xformers
|
23 |
+
sentence_transformers
|
24 |
+
tokenizers
|
25 |
+
optimum
|
26 |
+
auto-gptq
|
27 |
+
unstructured
|
28 |
+
|
29 |
+
#LANGRAPH
|
30 |
+
Langgraph
|
31 |
+
Operator
|
test.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
load_dotenv()
|
3 |
+
|
4 |
+
from langchain_community.document_loaders.csv_loader import CSVLoader
|
5 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
+
from langchain_openai import OpenAIEmbeddings, OpenAI, ChatOpenAI
|
7 |
+
from langchain_community.vectorstores.faiss import FAISS
|
8 |
+
from langchain_community.document_loaders import WebBaseLoader
|
9 |
+
from langchain.agents import tool
|
10 |
+
from langchain_openai import OpenAIEmbeddings
|
11 |
+
from langchain_community.vectorstores.faiss import FAISS
|
12 |
+
from langchain.chains import RetrievalQA
|
13 |
+
from langchain_openai import OpenAI
|
14 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
15 |
+
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder,SystemMessagePromptTemplate
|
16 |
+
from agent.datastructures import parser
|
17 |
+
from langchain.text_splitter import CharacterTextSplitter
|
18 |
+
|
19 |
+
|
20 |
+
def train():
|
21 |
+
|
22 |
+
documents = CSVLoader(file_path="train/posts.csv").load()
|
23 |
+
|
24 |
+
# Split document in chunks
|
25 |
+
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
|
26 |
+
docs = text_splitter.split_documents(documents=documents)
|
27 |
+
|
28 |
+
embeddings = OpenAIEmbeddings()
|
29 |
+
# Create vectors
|
30 |
+
vectorstore = FAISS.from_documents(docs, embeddings)
|
31 |
+
# Persist the vectors locally on disk
|
32 |
+
vectorstore.save_local("_rise_product_db");
|
33 |
+
|
34 |
+
print("trained")
|
35 |
+
|
36 |
+
def go():
|
37 |
+
# Load from local storage
|
38 |
+
embeddings = OpenAIEmbeddings()
|
39 |
+
persisted_vectorstore = FAISS.load_local("_rise_product_db", embeddings)
|
40 |
+
|
41 |
+
# Set Up LLM
|
42 |
+
from agent.prompt import prompt
|
43 |
+
llm = ChatOpenAI(model="gpt-4", temperature=0)
|
44 |
+
|
45 |
+
prompt = ChatPromptTemplate.from_messages([
|
46 |
+
|
47 |
+
SystemMessagePromptTemplate.from_template("""
|
48 |
+
|
49 |
+
{response_format}
|
50 |
+
|
51 |
+
{context}
|
52 |
+
|
53 |
+
""",partial_variables={"response_format": parser.get_format_instructions()})
|
54 |
+
])
|
55 |
+
|
56 |
+
# Use RetrievalQA chain for orchestration
|
57 |
+
qa = RetrievalQA.from_chain_type(llm=llm, retriever=persisted_vectorstore.as_retriever(),chain_type_kwargs={"prompt": prompt})
|
58 |
+
profile = "I would like to be a teacher, can you recommend an activity"
|
59 |
+
result = qa.invoke("recommend activities relevant to the following profile. Activities cannot have already begun: "+profile)
|
60 |
+
print(result)
|
61 |
+
|
62 |
+
go();
|
train/faq.py
CHANGED
@@ -1,16 +1,18 @@
|
|
1 |
-
|
2 |
def train():
|
3 |
from langchain_community.document_loaders.csv_loader import CSVLoader
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain_openai import OpenAIEmbeddings
|
6 |
from langchain_community.vectorstores.faiss import FAISS
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
11 |
|
12 |
# Split document in chunks
|
13 |
-
text_splitter = RecursiveCharacterTextSplitter(
|
|
|
|
|
|
|
|
|
14 |
docs = text_splitter.split_documents(documents=documents)
|
15 |
|
16 |
embeddings = OpenAIEmbeddings()
|
@@ -19,4 +21,4 @@ def train():
|
|
19 |
# Persist the vectors locally on disk
|
20 |
vectorstore.save_local("_rise_faq_db");
|
21 |
|
22 |
-
return {"trained":"success"}
|
|
|
|
|
1 |
def train():
|
2 |
from langchain_community.document_loaders.csv_loader import CSVLoader
|
3 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
from langchain_openai import OpenAIEmbeddings
|
5 |
from langchain_community.vectorstores.faiss import FAISS
|
6 |
+
from langchain_community.document_loaders import WebBaseLoader
|
7 |
+
|
|
|
8 |
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
9 |
|
10 |
# Split document in chunks
|
11 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
12 |
+
|
13 |
+
chunk_size=100,
|
14 |
+
chunk_overlap=20
|
15 |
+
)
|
16 |
docs = text_splitter.split_documents(documents=documents)
|
17 |
|
18 |
embeddings = OpenAIEmbeddings()
|
|
|
21 |
# Persist the vectors locally on disk
|
22 |
vectorstore.save_local("_rise_faq_db");
|
23 |
|
24 |
+
return {"trained":"success"}r
|
train/posts.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|