rise_ai / app.py
markpeace's picture
messing
4b31779
#ESTABLISH THE SERVER
from flask import Flask,request
from dotenv import load_dotenv
# Initializing flask app
app = Flask(__name__)
load_dotenv()
@app.route("/", methods=['GET','POST'])
def index():
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# Define your desired data structure.
class FrontEndActions(BaseModel):
"""Structure to pass actions back to the frontend"""
text: str = Field(description="The text to display on the button")
type: str = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestRiseActivity")
class ResponseSchema(BaseModel):
"""Final response to the question being asked"""
message: str = Field(description="final answer to respond to the user")
#characters: str = Field(description="number of characters in the answer")
#actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
#tokens: int = Field(description="Count the number of used to produce the response")
# Set up a parser + inject instructions into the prompt template.
parser = JsonOutputParser(pydantic_object=ResponseSchema)
prompt = PromptTemplate(
template="""Answer the user query.\n{format_instructions}\n{input}\n{agent_scratchpad}""",
input_variables=["input"],
partial_variables={"format_instructions": parser.get_format_instructions()}
)
print(parser)
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
from langchain.agents import tool
@tool
def get_word_length():
"""Returns the length of a word."""
return 1
tools = [get_word_length]
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
from langchain.agents import create_openai_functions_agent
agent = create_openai_functions_agent(llm, tools, prompt)
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
response = agent_executor.invoke({"input": "What are you?"})
return response['output']