balatechner commited on
Commit
0bc2945
·
verified ·
1 Parent(s): 9b18554

Added necessary files

Browse files
Files changed (9) hide show
  1. .gitignore +50 -0
  2. app.py +102 -0
  3. env_template.txt +2 -0
  4. existing_solution.py +210 -0
  5. models.py +32 -0
  6. prompts.py +106 -0
  7. requirements.txt +7 -0
  8. user_data.json +71 -0
  9. workflow.py +37 -0
.gitignore ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These are some examples of commonly ignored file patterns.
2
+ # You should customize this list as applicable to your project.
3
+ # Learn more about .gitignore:
4
+ # https://www.atlassian.com/git/tutorials/saving-changes/gitignore
5
+
6
+ # Node artifact files
7
+ node_modules/
8
+ dist/
9
+
10
+ # Compiled Java class files
11
+ *.class
12
+
13
+ # Compiled Python bytecode
14
+ *.py[cod]
15
+
16
+ # Log files
17
+ *.log
18
+
19
+ # Package files
20
+ *.jar
21
+
22
+ # Maven
23
+ target/
24
+ dist/
25
+
26
+ # JetBrains IDE
27
+ .idea/
28
+
29
+ # Unit test reports
30
+ TEST*.xml
31
+
32
+ # Generated by MacOS
33
+ .DS_Store
34
+
35
+ # Generated by Windows
36
+ Thumbs.db
37
+
38
+ # Applications
39
+ *.app
40
+ *.exe
41
+ *.war
42
+
43
+ # Large media files
44
+ *.mp4
45
+ *.tiff
46
+ *.avi
47
+ *.flv
48
+ *.mov
49
+ *.wmv
50
+ .env
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import requests
3
+ import json
4
+ import gradio as gr
5
+ from models import *
6
+ from pydantic import BaseModel, Field
7
+ from workflow import app_graph
8
+ from langchain.output_parsers import PydanticOutputParser
9
+ from existing_solution import *
10
+
11
+ class RouterResponse_1(BaseModel):
12
+ route :list[str]= Field(description=("A list of keys relevant to the user's query"))
13
+
14
+ class SummaryResponse_1(BaseModel):
15
+ information: str=Field(description=("Condensed information based on the context provided"))
16
+
17
+ route_op_1=PydanticOutputParser(pydantic_object=RouterResponse_1)
18
+ summary_op_1=PydanticOutputParser(pydantic_object=SummaryResponse_1)
19
+
20
+ async def solution_langchain(query,prev_msgs,json_data):
21
+
22
+ response=await router_chain_1.ainvoke({"query":query,"previous_messages":prev_msgs,"format_instructions":route_op_1.get_format_instructions()})
23
+ routes=route_op_1.parse(response.content).route
24
+ print(routes)
25
+ if len(routes)!=0:
26
+ result = {key: json_data[key] for key in routes}
27
+ print(result)
28
+ response= await summary_chain_1.ainvoke({"query":query,"data":json.dumps(result),"previous_messages":prev_msgs,
29
+ "format_instructions":summary_op_1.get_format_instructions()})
30
+ return summary_op_1.parse(response.content).information
31
+ else: return "Nothing"
32
+
33
+ async def process_inputs(input_string,uploaded_file):
34
+
35
+ if uploaded_file is not None:
36
+ try:
37
+ with open(uploaded_file) as f:
38
+ file_content = json.load(f)
39
+ except Exception as e:
40
+ print(e)
41
+ else:
42
+ raise Exception("User data Needed")
43
+ input_list=[]
44
+ inputs = {"query": input_string,"previous_msgs":input_list,"ui_data":file_content,'information':[]}
45
+
46
+ extracted_1= await solution_langchain(query=input_string,prev_msgs=input_list,json_data=file_content)
47
+ final_state= await app_graph.ainvoke(inputs)
48
+ extracted_2=final_state['information']
49
+
50
+ url = os.getenv("PERSONALITY_URL") + "/chat"
51
+ message_1 = RESPONSE_PROMPT.format(query=input_string, user_information=extracted_1)
52
+ payload_1 = {
53
+ "message": message_1,
54
+ "personality": 'humanish'
55
+ }
56
+
57
+ response_1 = requests.post(url, json=payload_1)
58
+ response_1.raise_for_status()
59
+
60
+ url = os.getenv("PERSONALITY_URL") + "/chat"
61
+ message_2= RESPONSE_PROMPT.format(query=input_string, user_information=extracted_2)
62
+ payload_2 = {
63
+ "message": message_2,
64
+ "personality": 'humanish'
65
+ }
66
+
67
+ response_2 = requests.post(url, json=payload_2)
68
+ response_2.raise_for_status()
69
+
70
+ messages = [
71
+ ChatMessage(role="user", content=input_string),]
72
+
73
+ # Create a ChatRequest object
74
+ request = ChatRequest(
75
+ messages=messages,
76
+ user_preferences=file_content,
77
+ personality="humanish"
78
+ )
79
+
80
+ # Call the chat endpoint asynchronously
81
+ response_3= await chat_endpoint(request)
82
+
83
+
84
+ return response_1.json()["response"], response_2.json()["response"], response_3.response
85
+
86
+
87
+ interface = gr.Interface(
88
+ fn=process_inputs,
89
+ inputs=[
90
+ gr.Textbox(label="Enter a string"),
91
+ gr.File(label="Upload a JSON file", type="filepath")
92
+ ],
93
+ outputs=[
94
+ gr.Textbox(label="Solution 1 Langchain"),
95
+ gr.Textbox(label="Solution 2 Langgraph"),
96
+ gr.Textbox(label="Existing Solution"),
97
+ ],
98
+ title="Extracting Relevant UI",
99
+ description="Provide a query, previous messages and user_data. Make sure in user data these keys are present :['name', 'age', 'gender', 'preferences', 'personalInformation', 'relatedDemographics', 'history', 'painPoints', 'inefficienciesOrQualityOfLifeImprovements', 'additionalNotes']"
100
+ )
101
+
102
+ interface.launch()
env_template.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPEN_ROUTER_API_KEY=""
2
+ PERSONALITY_URL=""
existing_solution.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ import httpx
4
+ import requests
5
+ import os
6
+ from dotenv import load_dotenv
7
+ from typing import List, Optional, Dict, Any
8
+ import json
9
+
10
+
11
+ load_dotenv()
12
+
13
+
14
+ OPENROUTER_API_KEY = os.getenv("OPEN_ROUTER_API_KEY")
15
+ PERSONALITY_URL = os.getenv("PERSONALITY_URL")
16
+ app = FastAPI(title="ER5 v1.0.0")
17
+
18
+ class ChatMessage(BaseModel):
19
+ role: str
20
+ content: str
21
+
22
+
23
+ class ChatRequest(BaseModel):
24
+ messages: List[ChatMessage]=[]
25
+ temperature: Optional[float] = 0.7
26
+ max_tokens: Optional[int] = 1000
27
+ user_preferences: Optional[Dict[str, Any]] = None
28
+ context: Optional[str] = ""
29
+ personality: Optional[str] = None
30
+
31
+ class ChatResponse(BaseModel):
32
+ response: str
33
+ context: Optional[str] = None
34
+
35
+ def build_system_message(preferences: Dict[str, Any]) -> dict:
36
+ """Create a system message based on schemaless user preferences"""
37
+ if not preferences:
38
+ return {"role": "system", "content": "You are a helpful assistant."}
39
+
40
+ preferences_str = "\n".join(
41
+ f"- {key}: {value if not isinstance(value, (list, dict)) else json.dumps(value)}"
42
+ for key, value in preferences.items()
43
+ )
44
+
45
+ system_content = f"""You are a helpful assistant. Please consider these user preferences in your responses:
46
+
47
+ {preferences_str}
48
+
49
+ Tailor your responses to align with these user preferences and characteristics."""
50
+
51
+ return {"role": "system", "content": system_content}
52
+
53
+ def parse_context(context_str: str) -> List[dict]:
54
+ """Parse context string into messages list"""
55
+ if not context_str:
56
+ return []
57
+ try:
58
+ return json.loads(context_str)
59
+ except json.JSONDecodeError:
60
+ return []
61
+
62
+
63
+
64
+ async def call_openrouter(messages: List[dict], temperature: float, max_tokens: int):
65
+ url = "https://openrouter.ai/api/v1/chat/completions"
66
+
67
+ headers = {
68
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
69
+ "Content-Type": "application/json"
70
+ }
71
+
72
+ payload = {
73
+ "model": "qwen/qwen-2.5-7b-instruct",
74
+ "messages": messages,
75
+ "temperature": temperature,
76
+ "max_tokens": max_tokens
77
+ }
78
+
79
+ async with httpx.AsyncClient() as client:
80
+ response = await client.post(url, json=payload, headers=headers)
81
+
82
+ if response.status_code != 200:
83
+ raise HTTPException(status_code=response.status_code,
84
+ detail=f"OpenRouter API error: {response.text}")
85
+
86
+ return response.json()['choices'][0]['message']['content']
87
+ # @app.post("/chat", response_model=ChatResponse)
88
+ async def chat_endpoint(request: ChatRequest):
89
+ try:
90
+
91
+
92
+ include_preferences = await should_include_preferences_llm(request.messages[-1].content)
93
+
94
+
95
+ messages = []
96
+
97
+ if include_preferences and request.user_preferences:
98
+ messages.append(build_system_message(request.user_preferences))
99
+
100
+
101
+ context_messages = parse_context(request.context)
102
+ messages.extend(context_messages)
103
+
104
+
105
+ messages.extend([{"role": msg.role, "content": msg.content}
106
+ for msg in request.messages])
107
+
108
+
109
+ messages = messages[-10:]
110
+
111
+ print(messages)
112
+
113
+ response = await decide_chat_api(
114
+ messages=messages,
115
+ temperature=request.temperature,
116
+ max_tokens=request.max_tokens,
117
+ personality=request.personality
118
+ )
119
+
120
+ assistant_message = response
121
+
122
+
123
+ context_messages = messages[1:] if include_preferences else messages
124
+ context_messages.append({"role": "assistant", "content": assistant_message})
125
+ updated_context = json.dumps(context_messages[-10:])
126
+
127
+ return ChatResponse(response=assistant_message, context=updated_context)
128
+
129
+ except Exception as e:
130
+ raise HTTPException(status_code=500, detail=str(e))
131
+
132
+ async def should_include_preferences_llm(query: str) -> bool:
133
+ """
134
+ Use the LLM to decide if user preferences should be included based on the query.
135
+ """
136
+ url = "https://openrouter.ai/api/v1/chat/completions"
137
+ headers = {
138
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
139
+ "Content-Type": "application/json"
140
+ }
141
+
142
+
143
+ prompt = (
144
+ "You are an assistant that classifies user queries. "
145
+ "Given the query below, decide if user-specific context or preferences are relevant. "
146
+ "Respond with 'true' if any kind of personal context or preferences should be included in the response, "
147
+ "and 'false' otherwise.\n\n"
148
+ f"Query: {query}\n\n"
149
+ "Is this query related to personal context such as user preferences, lifestyle, habits, or any other factors "
150
+ "that could affect the response?"
151
+ )
152
+
153
+ payload = {
154
+ "model": "qwen/qwen-2.5-7b-instruct",
155
+ "messages": [{"role": "user", "content": prompt}],
156
+ "temperature": 0.0,
157
+ "max_tokens": 5
158
+ }
159
+
160
+ async with httpx.AsyncClient() as client:
161
+ response = await client.post(url, json=payload, headers=headers)
162
+
163
+ if response.status_code != 200:
164
+ raise HTTPException(status_code=response.status_code,
165
+ detail=f"OpenRouter API error: {response.text}")
166
+
167
+
168
+ llm_response = response.json()['choices'][0]['message']['content'].strip().lower()
169
+ return llm_response == "true"
170
+
171
+ async def call_personality_api(messages: List[dict], personality: str):
172
+ """
173
+ Call the external Personality API to handle chat completions.
174
+ """
175
+ url = "https://th4spvxpzqpmhb-8000.proxy.runpod.net/" + "/chat"
176
+ message_str = "\n".join([msg['content'] for msg in messages]) if messages else ""
177
+
178
+ payload = {
179
+ "message": message_str,
180
+ "personality": personality
181
+ }
182
+ payload = {
183
+ "message": message_str,
184
+ "personality": 'humanish'
185
+ }
186
+
187
+ response = requests.post(url, json=payload)
188
+ if response.status_code != 200:
189
+ raise HTTPException(status_code=response.status_code,
190
+ detail=f"Personality API error: {response.text}")
191
+
192
+ response_json = response.json()
193
+ if "response" in response_json:
194
+ return response_json["response"]
195
+
196
+
197
+ async def decide_chat_api(messages: List[dict], temperature: float, max_tokens: int,personality:str):
198
+ """
199
+ This function decides which API to call based on the presence of the PERSONALITY_URL environment variable.
200
+ """
201
+ if PERSONALITY_URL and PERSONALITY_URL.strip() and personality:
202
+ print("yes")
203
+ return await call_personality_api(messages,personality)
204
+ else:
205
+
206
+ return await call_openrouter(messages, temperature, max_tokens)
207
+
208
+ # if __name__ == "__main__":
209
+ # import uvicorn
210
+ # uvicorn.run(app, host="0.0.0.0", port=8000)
models.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pydantic import BaseModel, Field
3
+
4
+ from dotenv import load_dotenv
5
+ from prompts import *
6
+
7
+ from langchain_openai import ChatOpenAI
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+
10
+ load_dotenv()
11
+
12
+ class RouterResponse_2(BaseModel):
13
+ route :list[str]= Field(description=("A list of keys relevant to the user's query"))
14
+ class ExtractorResponse_2(BaseModel):
15
+ information: str=Field(description=("Condensed information based on the context provided"))
16
+
17
+ llm = ChatOpenAI(model="openai/gpt-4o-mini",temperature=0.7,base_url="https://openrouter.ai/api/v1",
18
+ api_key=os.getenv("OPEN_ROUTER_API_KEY"))
19
+
20
+ router_prompt_1 = ChatPromptTemplate.from_messages([
21
+ ("system", "You are a routing assistant."),
22
+ ("user", router_instruction_prompt_1.format(query="{query}", previous_messages="{previous_messages}",
23
+ format_instructions="{format_instructions}"))])
24
+ router_chain_1= router_prompt_1 | llm
25
+ summary_prompt_1 = ChatPromptTemplate.from_messages([
26
+ ("system", "You are a Summarising assistant."),
27
+ ("user", summary_prompt_instructions_1.format(query="{query}", previous_messages="{previous_messages}",
28
+ data="{data}",format_instructions="{format_instructions}"))])
29
+ summary_chain_1 = summary_prompt_1 | llm
30
+
31
+ router = router_instruction_prompt_2 | llm.with_structured_output(RouterResponse_2)
32
+ extractor = extract_prompt_instructions_2 | llm.with_structured_output(ExtractorResponse_2)
prompts.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import PromptTemplate
2
+
3
+ router_instruction_prompt_1="""
4
+ You are tasked with analyzing user input to identify the most relevant keys from a dataset of user information.
5
+ Each key provides specific details about the user.
6
+ Your goal is to understand the query and conversation context to select the appropriate keys from which the user's data can help in providing accurate, personalised and actionable solutions.
7
+
8
+ 1. name: The user's full name.
9
+ 2. age: The user's age.
10
+ 3. gender: The user's gender.
11
+ 4. preferences: These are user's preferences.
12
+ 5. personalInformation: These are user's personal information details.
13
+ 6. relatedDemographics: Information pertaining to the user's geographic location and other demographic factors.
14
+ 7. history: A record of the user's recent activities, including search history, recent purchases, places visited, and other relevant actions or events.
15
+ 8. painPoints: The user's complaints, challenges, or issues they face.
16
+ 9. inefficienciesOrQualityOfLifeImprovements: Areas where the user seeks improvement, along with potential solutions to increase efficiency or enhance their quality of life.
17
+ 10. additionalNotes: Any extra information that might be relevant or helpful about the user.
18
+
19
+ Instructions:
20
+ - Analyze the user’s query and previous message to understand its purpose, context, and specific needs.
21
+ - Determine which keys (one or multiple) are most relevant to extract the required information to provide a comprehensive and actionable response.
22
+ - If no additional information is needed or no relevant details are found, route to None.
23
+ - Pay special attention to personalInformation if the user's financial situation or budget is relevant to the decision-making process.
24
+
25
+ Query:
26
+ {query}
27
+
28
+ Previous Messages:
29
+ {previous_messages}
30
+
31
+ Format Instructions:
32
+ {format_instructions}
33
+ """
34
+
35
+ summary_prompt_instructions_1="""
36
+ Given the following JSON data,query and previous messages condense all the information into a summary that captures all the details that may be relevant to providing solutions for the user query.
37
+ Include in the summary only those points that are relevant to the query or conversation context. If nothing was found, return ""
38
+ Query:
39
+ {query}
40
+
41
+ Previous Messages:
42
+ {previous_messages}
43
+
44
+ Json Data:
45
+ {data}
46
+
47
+ Format Instructions:
48
+ {format_instructions}
49
+ """
50
+
51
+ router_instruction_prompt_2=PromptTemplate.from_template("""
52
+ You are tasked with analyzing user input to identify the most relevant keys from a dataset of user information.
53
+ Each key provides specific details about the user.
54
+ Your goal is to understand the query and conversation context to select the appropriate keys from which the user's data can help in providing accurate, personalised and actionable solutions.
55
+
56
+ 1. name: The user's full name.
57
+ 2. age: The user's age.
58
+ 3. gender: The user's gender.
59
+ 4. preferences: These are user's preferences.
60
+ 5. personalInformation: These are user's personal information details.
61
+ 6. relatedDemographics: Information pertaining to the user's geographic location and other demographic factors.
62
+ 7. history: A record of the user's recent activities, including search history, recent purchases, places visited, and other relevant actions or events.
63
+ 8. painPoints: The user's complaints, challenges, or issues they face.
64
+ 9. inefficienciesOrQualityOfLifeImprovements: Areas where the user seeks improvement, along with potential solutions to increase efficiency or enhance their quality of life.
65
+ 10. additionalNotes: Any extra information that might be relevant or helpful about the user.
66
+
67
+ Instructions:
68
+ - Analyze the user’s query and previous message to understand its purpose, context, and specific needs.
69
+ - Determine which keys (one or multiple) are most relevant to extract the required information to provide a comprehensive, personalised and actionable response.
70
+ - If no additional information is needed or no relevant details are found, route to None.
71
+ - Pay special attention to personalInformation if the user's financial situation or budget is relevant to the decision-making process.
72
+
73
+ Query:
74
+ {query}
75
+
76
+ Previous Messages:
77
+ {previous_messages}
78
+ """)
79
+
80
+ extract_prompt_instructions_2=PromptTemplate.from_template("""
81
+ Given the following JSON data,query and previous messages extract the information that may be relevant to providing solutions for the user query.
82
+
83
+ Instructions:
84
+ - Include in the summary only those points that are relevant to the query or conversation context.
85
+ - If nothing relevant was found, return "".
86
+ - Do not provide anything in your own words.
87
+
88
+ Query:
89
+ {query}
90
+
91
+ Previous Messages:
92
+ {previous_messages}
93
+
94
+ Json Data:
95
+ {data}
96
+
97
+ """)
98
+
99
+ RESPONSE_PROMPT="""You are helpful assistant, Based on the User information and the query provide a response accordingly.
100
+ Do not Fabricate or provide false details
101
+ User information:
102
+ {user_information}
103
+
104
+ Query:
105
+ {query}
106
+ """
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ requests
2
+ langchain
3
+ langchain_openai
4
+ langgraph
5
+ gradio
6
+ pydantic
7
+ python-dotenv
user_data.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Sarah Thompson",
3
+ "age": 34,
4
+ "gender": "Female",
5
+ "preferences": {
6
+ "hobbies": ["Photography", "Hiking", "Yoga", "Cooking Italian cuisine", "Attending local art exhibitions"],
7
+ "interests": ["Environmental sustainability", "Technology trends", "Travel blogging", "Contemporary fiction literature"],
8
+ "favoriteColor": "Teal"
9
+ },
10
+ "personalInformation": {
11
+ "approximateIncomeLevel": 85000,
12
+ "spendingPower": "Moderate to high",
13
+ "location": "Seattle, Washington",
14
+ "personality": ["Extroverted", "Organized", "Environmentally conscious", "Tech-savvy"],
15
+ "relationshipStatus": "Single",
16
+ "job": "Professional Photographer",
17
+ "educationLevel": "Master's degree in Rocket Science"
18
+ },
19
+ "relatedDemographics": {
20
+ "groups": ["Member of a local photography club", "Active participant in a yoga studio community"],
21
+ "relatedInterests": [
22
+ "Individuals interested in photography often engage with graphic design and visual arts communities",
23
+ "Environmental enthusiasts frequently explore topics like sustainable living and eco-friendly products"
24
+ ]
25
+ },
26
+ "history": {
27
+ "recentSearches": [
28
+ "Best mirrorless cameras 2023",
29
+ "Yoga retreats in Costa Rica",
30
+ "Electric cars vs. hybrids",
31
+ "Sustainable fashion brands",
32
+ "Samsung Galaxy S35 Ultra pro max",
33
+ "Iphone 19 with telescopic camera"
34
+ ],
35
+ "pastPurchases": [
36
+ "Organic skincare products",
37
+ "Tickets to local music festivals"
38
+ ],
39
+ "restaurantsVisited": ["Vegan bistros", "Farm-to-table restaurants", "Authentic Italian eateries"],
40
+ "storesFrequented": ["Whole Foods Market", "REI", "Apple Store", "Local independent bookstores"],
41
+ "websitesAppsUsedFrequently": ["Instagram", "Pinterest", "TripAdvisor", "Amazon", "Headspace app"]
42
+ },
43
+ "painPoints": {
44
+ "complaints": [
45
+ "Difficulty finding stylish yet sustainable clothing options",
46
+ "Frustration with traffic congestion during daily commute",
47
+ "Challenges in balancing work commitments with personal life and hobbies"
48
+ ],
49
+ "problems": [
50
+ "Concerned about environmental impact but finds eco-friendly products often priced at a premium",
51
+ "Aspires to advance her career but lacks time to pursue additional certifications or education"
52
+ ]
53
+ },
54
+ "inefficienciesOrQualityOfLifeImprovements": {
55
+ "areasForImprovement": [
56
+ "Seeking affordable, sustainable products without compromising on style or quality",
57
+ "Looking for more efficient commuting options to reduce time spent in traffic",
58
+ "Interested in apps or services that aid in time management and enhance productivity"
59
+ ],
60
+ "potentialSolutions": [
61
+ "Subscription services offering sustainable fashion at discounted rates",
62
+ "Carpooling apps or electric scooter services for a quicker commute",
63
+ "Productivity tools that integrate work schedules with personal goals"
64
+ ]
65
+ },
66
+ "additionalNotes": {
67
+ "techEngagement": "Being tech-savvy, Sarah is an early adopter of new technologies and is likely to be interested in beta testing new apps or gadgets",
68
+ "environmentalAdvocacy": "Actively participates in community clean-up events and promotes sustainable practices within her workplace",
69
+ "socialMediaInfluence": "Has a modest following on Instagram where she shares photography and sustainability tips, making her a micro-influencer in those niches"
70
+ }
71
+ }
workflow.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing_extensions import TypedDict
3
+ from models import extractor, router
4
+ from langgraph.graph import StateGraph, START,END
5
+
6
+ class PlanExecute(TypedDict):
7
+ ui_data:dict
8
+ query:str
9
+ previous_msgs:list
10
+ list_keys: list
11
+ information:list
12
+
13
+ async def route_step(state: PlanExecute):
14
+ response = await router.ainvoke({"query":state["query"],"previous_messages":state["previous_msgs"]})
15
+ return {"list_keys":response.route}
16
+
17
+ async def extract_step(state: PlanExecute):
18
+ key=state['list_keys'][0]
19
+ response = await extractor.ainvoke({"query":state["query"],"data":json.dumps(state['ui_data'][key]),"previous_messages":state["previous_msgs"]})
20
+ state['list_keys'].pop(0)
21
+ if response.information!='':
22
+ state['information'].append(response.information)
23
+ print(state['information'])
24
+
25
+ def should_end(state: PlanExecute):
26
+ if len(state["list_keys"])==0:
27
+ return END
28
+ else:
29
+ return "extract_step"
30
+
31
+ workflow = StateGraph(PlanExecute)
32
+ workflow.add_node("route_step", route_step)
33
+ workflow.add_node("extract_step", extract_step)
34
+ workflow.add_edge(START, "route_step")
35
+ workflow.add_conditional_edges("route_step",should_end,[END, "extract_step"])
36
+ workflow.add_conditional_edges("extract_step",should_end,[END, "extract_step"])
37
+ app_graph = workflow.compile()