Spaces:
Sleeping
Sleeping
File size: 5,084 Bytes
60ce1fd b092604 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
from typing import TypedDict, Dict
from langgraph.graph import StateGraph, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables.graph import MermaidDrawMethod
from langchain_openai import ChatOpenAI
import os
from dotenv import load_dotenv
from utils._admin_util import create_rag
class State(TypedDict):
query: str
category: str
sentiment: str
response: str
def check_api_key():
load_dotenv()
"""Verify that the API key is set and valid"""
api_key = os.getenv("OPENAI_API_KEY")
print("api_key", api_key)
if not api_key:
raise ValueError("OpenAI API key not found in environment variables")
return api_key
api_key = check_api_key()
llm = ChatOpenAI(
model="gpt-3.5-turbo",
openai_api_key=api_key,
temperature=0.7
)
def rag(state: State)->State:
rag_chain = create_rag()
# Extract just the query string from the state
query = state["query"]
print("query", query)
response = rag_chain.invoke(query) # Pass the string directly, not a dict
print("response", response)
return {"response": response}
def categorize(state: State) -> State:
"HR, IT, Transportation"
prompt = ChatPromptTemplate.from_template(
"Categorize the following query into one of these categories: "
"HR, IT, Transportation, Other. Query: {query}"
)
chain = prompt | llm
category = chain.invoke({"query": state["query"]}).content
return {"category": category}
def analyze_sentiment(state: State) -> State:
prompt = ChatPromptTemplate.from_template(
"Analyze the sentiment of the following customer query"
"Response with either 'Position', 'Neutral' , or 'Negative'. Query: {query}"
)
chain = prompt | llm
sentiment = chain.invoke({"query": state["query"]}).content
return {"sentiment": sentiment}
def handle_hr(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a HR support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_it(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a IT support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_transportation(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a transportation support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_general(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a general support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def escalate(state: State)->State:
return {"response": "This query has been escalate to a human agent due to its negative sentiment"}
def route_query(state: State)->State:
if state["sentiment"] == "Negative":
return "escalate"
elif state["category"] == "HR":
return "handle_hr"
elif state["category"] == "IT":
return "handle_it"
elif state["category"] == "Transportation":
return "handle_transportation"
else:
return "handle_general"
def rout_to_agent(state: State)->State:
if "i don't know" in state["response"].lower():
print(state["response"])
print("return analyze_sentiment")
return "analyze_sentiment"
else:
return "END"
def run_customer_support(query: str)->Dict[str, str]:
workflow = StateGraph(State)
workflow.add_node("categorize", categorize)
workflow.add_node("rag", rag)
workflow.add_node("analyze_sentiment", analyze_sentiment)
workflow.add_node("handle_hr", handle_hr)
workflow.add_node("handle_it", handle_it)
workflow.add_node("handle_transportation", handle_transportation)
workflow.add_node("escalate", escalate)
workflow.add_edge("categorize", "rag")
workflow.add_conditional_edges("rag", rout_to_agent, {"analyze_sentiment": "analyze_sentiment", "END": END})
workflow.add_conditional_edges(
"analyze_sentiment",
route_query,
{
"handle_hr" : "handle_hr",
"handle_it" : "handle_it",
"handle_transportation" : "handle_transportation",
"escalate": "escalate"
}
)
workflow.add_edge("handle_hr", END)
workflow.add_edge("handle_it", END)
workflow.add_edge("handle_transportation", END)
workflow.add_edge("escalate", END)
workflow.set_entry_point("categorize")
app = workflow.compile()
results = app.invoke({"query": query})
return {
"category": results.get('category', ''), # Returns empty string if key missing
"sentiment": results.get('sentiment', ''),
"response": results['response']
} |