Spaces:
Sleeping
Sleeping
| import datasets | |
| from langchain.docstore.document import Document | |
| # Load the dataset | |
| # guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train") | |
| # Convert dataset entries into Document objects | |
| # docs = [ | |
| # Document( | |
| # page_content="\n".join([ | |
| # f"Name: {guest['name']}", | |
| # f"Relation: {guest['relation']}", | |
| # f"Description: {guest['description']}", | |
| # f"Email: {guest['email']}" | |
| # ]), | |
| # metadata={"name": guest["name"]} | |
| # ) | |
| # for guest in guest_dataset | |
| # ] | |
| # from langchain_community.retrievers import BM25Retriever | |
| # from langchain.tools import Tool | |
| # bm25_retriever = BM25Retriever.from_documents(docs) | |
| # def extract_text(query: str) -> str: | |
| # """Retrieves detailed information about gala guests based on their name or relation.""" | |
| # results = bm25_retriever.invoke(query) | |
| # if results: | |
| # return "\n\n".join([doc.page_content for doc in results[:3]]) | |
| # else: | |
| # return "No matching guest information found." | |
| # guest_info_tool = Tool( | |
| # name="guest_info_retriever", | |
| # func=extract_text, | |
| # description="Retrieves detailed information about gala guests based on their name or relation." | |
| # ) | |
| ####################################################################################################################################################### | |
| from typing import TypedDict, Annotated | |
| from langgraph.graph.message import add_messages | |
| from langchain_core.messages import AnyMessage, HumanMessage, AIMessage,SystemMessage | |
| from langgraph.prebuilt import ToolNode | |
| from langgraph.graph import START, StateGraph | |
| from langgraph.prebuilt import tools_condition | |
| from langchain_openai import ChatOpenAI | |
| from Webserch_tool import weather_info_tool | |
| from other_tools import ( | |
| wiki_search, arvix_search, web_search, vector_search, | |
| multiply, add, subtract, divide, modulus, power, square_root | |
| ) | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| # Generate the chat interface, including the tools | |
| llm = ChatOpenAI(temperature=0 | |
| , model="gpt-4o-mini", openai_api_key=os.getenv("OPENAI_KEY")) | |
| tools = [ | |
| weather_info_tool, wiki_search, arvix_search, web_search, | |
| multiply, add, subtract, divide, modulus, power, square_root | |
| ] | |
| chat_with_tools = llm.bind_tools(tools) | |
| #setting up prompt | |
| ai_message = SystemMessage(content="""You are a helpful assistant tasked with answering questions using a set of tools and reference materials. | |
| You may be provided with a reference set of questions and answers from a retriever. | |
| If the current question is identical to or semantically equivalent to a reference question, or if a reference answer clearly applies, use that reference answer directly. | |
| Otherwise, reason through the question as needed to determine the correct answer. | |
| Your output must follow these formatting rules: | |
| - If the answer is a number, do not use commas or units (unless specifically requested). | |
| - If the answer is a string, do not use articles, abbreviations, or short forms. Write digits in full unless specified otherwise. | |
| - If the answer is a comma-separated list, apply the above rules to each item and include exactly one space after each comma. | |
| - If the question matches a reference question, return the reference answer exactly as it appears. | |
| Do not include any explanation, prefix, or extra text—output only the final answer. | |
| """) | |
| # Generate the AgentState and Agent graph | |
| from langgraph.graph import MessagesState #the same as AgentState | |
| # class AgentState(TypedDict): | |
| # messages: Annotated[list[AnyMessage], add_messages] | |
| def assistant(state: MessagesState): | |
| return { | |
| "messages": [chat_with_tools.invoke(state["messages"])], | |
| } | |
| def retriever(state: MessagesState): | |
| """Retriever node""" | |
| similar_question = vector_search(state["messages"][0].content) | |
| if similar_question: | |
| example_msg = HumanMessage( | |
| content=f"Here I provide a similar question and answer for reference: \n\n{similar_question}", | |
| ) | |
| print(f"Similar question found: {similar_question}") | |
| return {"messages": [ai_message] + state["messages"] + [example_msg]} | |
| else: | |
| # Handle the case when no similar questions are found | |
| print( "No similar question found.") | |
| return {"messages": [ai_message] + state["messages"]} | |
| ## The graph | |
| builder = StateGraph(MessagesState) | |
| # Define nodes: these do the work | |
| builder.add_node("assistant", assistant) | |
| builder.add_node("retriever", retriever) | |
| builder.add_node("tools", ToolNode(tools)) | |
| # Define edges: these determine how the control flow moves | |
| builder.add_edge(START, "retriever") | |
| builder.add_edge("retriever", "assistant") | |
| builder.add_conditional_edges( | |
| "assistant", | |
| # If the latest message requires a tool, route to tools | |
| # Otherwise, provide a direct response | |
| tools_condition, | |
| ) | |
| builder.add_edge("tools", "assistant") | |
| alfred = builder.compile() | |
| # messages = [HumanMessage(content="When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?")] | |
| # #messages = [HumanMessage(content="What the remainder of 30 divided by 7?")] | |
| # response = alfred.invoke({"messages": messages}) | |
| # print(response['messages'][-1].content) | |