File size: 5,423 Bytes
34a64af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import sys
print("Python version")
print (sys. version)
from typing import Annotated, Sequence, TypedDict
import operator
import functools
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_experimental.tools import PythonREPLTool
from langchain.agents import create_openai_tools_agent
from langchain_huggingface import HuggingFacePipeline
from langgraph.graph import StateGraph, END
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# SETUP: HuggingFace Model and Pipeline
#name = "meta-llama/Llama-3.2-1B"
#name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
#name="deepseek-ai/deepseek-llm-7b-chat"
name="openai-community/gpt2"
#name="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
#name="microsoft/Phi-3.5-mini-instruct"
#name="Qwen/Qwen2.5-7B-Instruct-1M"
tokenizer = AutoTokenizer.from_pretrained(name,truncation=True)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(name)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device_map="auto",
max_new_tokens=500, # text to generate for outputs
)
print ("pipeline is created")
# Wrap in LangChain's HuggingFacePipeline
llm = HuggingFacePipeline(pipeline=pipe)
# Members and Final Options
members = ["Researcher", "Coder"]
options = ["FINISH"] + members
# Supervisor prompt
system_prompt = (
"You are a supervisor tasked with managing a conversation between the following workers: {members}."
" Given the following user request, respond with the workers to act next. Each worker will perform a task"
" and respond with their results and status. When all workers are finished, respond with FINISH."
)
# Prompt template required for the workflow
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
("system", "Given the conversation above, who should act next? Or Should we FINISH? Select one of: {options}"),
]
).partial(options=str(options), members=", ".join(members))
print ("Prompt Template created")
# Supervisor routing logic
def route_tool_response(llm_response):
"""
Parse the LLM response to determine the next step based on routing logic.
"""
if "FINISH" in llm_response:
return "FINISH"
for member in members:
if member in llm_response:
return member
return "Unknown"
def supervisor_chain(state):
"""
Supervisor logic to interact with HuggingFacePipeline and decide the next worker.
"""
messages = state.get("messages", [])
user_prompt = prompt.format(messages=messages)
try:
llm_response = pipe(user_prompt, max_new_tokens=500)[0]["generated_text"]
except Exception as e:
raise RuntimeError(f"LLM processing error: {e}")
next_action = route_tool_response(llm_response)
return {"next": next_action}
# AgentState definition
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
# Create tools
tavily_tool = TavilySearchResults(max_results=5)
python_repl_tool = PythonREPLTool()
# Create agents with their respective prompts
research_agent = create_openai_tools_agent(
llm=llm,
tools=[tavily_tool],
prompt=ChatPromptTemplate.from_messages(
[
SystemMessage(content="You are a web researcher."),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"), # Add required placeholder
]
),
)
print ("Created agents with their respective prompts")
code_agent = create_openai_tools_agent(
llm=llm,
tools=[python_repl_tool],
prompt=ChatPromptTemplate.from_messages(
[
SystemMessage(content="You may generate safe Python code for analysis."),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"), # Add required placeholder
]
),
)
print ("create_openai_tools_agent")
# Create the workflow
workflow = StateGraph(AgentState)
# Nodes
workflow.add_node("Researcher", research_agent) # Pass the agent directly (no .run required)
workflow.add_node("Coder", code_agent) # Pass the agent directly
workflow.add_node("supervisor", supervisor_chain)
# Add edges for workflow transitions
for member in members:
workflow.add_edge(member, "supervisor")
workflow.add_conditional_edges(
"supervisor",
lambda x: x["next"],
{k: k for k in members} | {"FINISH": END} # Dynamically map workers to their actions
)
# Define entry point
workflow.set_entry_point("supervisor")
print(workflow)
# Compile the workflow
graph = workflow.compile()
from IPython.display import display, Image
display(Image(graph.get_graph().draw_mermaid_png()))
# Properly formatted initial state
initial_state = {
"messages": [
#HumanMessage(content="Code hello world and print it to the terminal.") # Correct format for user input
HumanMessage(content="Write Code for printing \"hello world\" in Python. Keep it precise.") # Correct format for user input
]
}
# Execute the workflow
result = graph.invoke(initial_state)
print("Workflow Result:", result) |