Adriana213 commited on
Commit
be06206
·
verified ·
1 Parent(s): 82a9095
Files changed (1) hide show
  1. conv_career_tools_adriana.py +347 -347
conv_career_tools_adriana.py CHANGED
@@ -1,347 +1,347 @@
1
- #!/usr/bin/env python
2
- # coding: utf-8
3
-
4
- # # Career Conversation Project
5
-
6
- # In[41]:
7
-
8
-
9
- from dotenv import load_dotenv
10
- from openai import OpenAI
11
- import json
12
- import os
13
- import requests
14
- from pypdf import PdfReader
15
- import gradio as gr
16
-
17
-
18
- # In[42]:
19
-
20
-
21
- load_dotenv(override=True)
22
- openai = OpenAI()
23
- gemini = OpenAI(
24
- api_key = os.getenv('GOOGLE_API_KEY'),
25
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
26
- )
27
-
28
-
29
- # In[43]:
30
-
31
-
32
- pushover_user = os.getenv("PUSHOVER_USER")
33
- pushover_token = os.getenv("PUSHOVER_TOKEN")
34
- pushover_url = "https://api.pushover.net/1/messages.json"
35
-
36
-
37
- # In[44]:
38
-
39
-
40
- reader = PdfReader("../me/linkedin.pdf")
41
- linkedin = ""
42
- for page in reader.pages:
43
- text = page.extract_text()
44
- if text:
45
- linkedin += text
46
-
47
- with open("../me/summary.txt", "r", encoding="utf-8") as f:
48
- summary = f.read()
49
-
50
- name = "Adriana Salcedo"
51
-
52
-
53
- # In[45]:
54
-
55
-
56
- def push(message):
57
- print(f"Push: {message}")
58
- payload = {"user": pushover_user, "token": pushover_token, "message": message}
59
- requests.post(pushover_url, data=payload)
60
-
61
-
62
- # ## Tools
63
-
64
- # In[46]:
65
-
66
-
67
- def record_user_details(email, name="Name not provided", notes="not provided"):
68
- push(f"Recording interest from {name} with email {email} and notes {notes}")
69
- return {"recorded": "ok"}
70
-
71
-
72
- def record_unknown_question(question):
73
- push(f"Recording {question} asked that I couldn't answer")
74
- return {"recorded": "ok"}
75
-
76
- def record_personal_question(question, acceptable):
77
- if acceptable:
78
- push(f'A personal question was asked and answered:\n {question}')
79
- else:
80
- push(f'A personal question was asked and not answered:\n {question}')
81
- return {"recorded": "ok"}
82
-
83
-
84
- def record_skill_question(question):
85
- push(f'A skill-related question was asked:\n {question}')
86
- return {'recorded': 'ok'}
87
-
88
-
89
- # In[47]:
90
-
91
-
92
- record_user_details_json = {
93
- "name": "record_user_details",
94
- "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
95
- "parameters": {
96
- "type": "object",
97
- "properties": {
98
- "email": {
99
- "type": "string",
100
- "description": "The email address of this user"
101
- },
102
- "name": {
103
- "type": "string",
104
- "description": "The user's name, if they provided it"
105
- }
106
- ,
107
- "notes": {
108
- "type": "string",
109
- "description": "Any additional information about the conversation that's worth recording to give context"
110
- }
111
- },
112
- "required": ["email"],
113
- "additionalProperties": False
114
- }
115
- }
116
-
117
-
118
- # In[48]:
119
-
120
-
121
- record_unknown_question_json = {
122
- "name": "record_unknown_question",
123
- "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
124
- "parameters": {
125
- "type": "object",
126
- "properties": {
127
- "question": {
128
- "type": "string",
129
- "description": "The question that couldn't be answered"
130
- },
131
- },
132
- "required": ["question"],
133
- "additionalProperties": False
134
- }
135
- }
136
-
137
-
138
- # In[49]:
139
-
140
-
141
- record_personal_question_json = {
142
- 'name': 'record_personal_question',
143
- 'description': 'Use this tool to log if a personal question was asked. Indicate if the question is acceptable (can be answered) or not.',
144
- 'parameters': {
145
- 'type': 'object',
146
- 'properties': {
147
- 'question': {
148
- 'type': 'string',
149
- 'description': 'Question that will not be answered'
150
- },
151
- 'acceptable': {
152
- 'type': 'boolean',
153
- 'description': 'Indicates if a question is acceptable or not'
154
- }
155
- },
156
- 'required': ['question', 'acceptable'],
157
- 'additionalProperties': False
158
- }
159
- }
160
-
161
-
162
- # In[50]:
163
-
164
-
165
- record_skill_question_json = {
166
- 'name': 'record_skill_question',
167
- 'description': (
168
- "Whenever a user asks about any skill, technology, tool, programming language, or experience"
169
- "regardless of whether it is present in the profile or not. ALWAYS use this tool to notify the owner. "
170
- "Pass the original user question as the argument. "
171
- "Examples: 'Do you know Python?', 'Have you worked with Tableau?', 'Are you familiar with cloud computing?'"
172
- ),
173
- 'parameters': {
174
- 'type': 'object',
175
- 'properties': {
176
- 'question': {
177
- 'type': 'string',
178
- 'description': 'Skill-related question was asked'
179
- },
180
- },
181
- 'required': ['question'],
182
- 'additionalProperties': False
183
- }
184
- }
185
-
186
-
187
- # In[51]:
188
-
189
-
190
- tools = [{"type": "function", "function": record_user_details_json},
191
- {"type": "function", "function": record_unknown_question_json},
192
- {'type': 'function', 'function': record_personal_question_json},
193
- {'type': 'function', 'function': record_skill_question_json}
194
-
195
- ]
196
-
197
-
198
- # In[52]:
199
-
200
-
201
- def handle_tool_calls(tool_calls):
202
- results = []
203
- for tool_call in tool_calls:
204
- tool_name = tool_call.function.name
205
- arguments = json.loads(tool_call.function.arguments)
206
- print(f"Tool called: {tool_name}", flush=True)
207
- tool = globals().get(tool_name)
208
- result = tool(**arguments) if tool else {}
209
- results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
210
- return results
211
-
212
-
213
- # In[53]:
214
-
215
-
216
- system_prompt = f"You are acting as {name}. You are answering questions on {name}'s website, \
217
- particularly questions related to {name}'s career, background, skills and experience. \
218
- Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \
219
- You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \
220
- Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
221
- If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
222
- If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. "
223
-
224
- system_prompt += f"\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n"
225
- system_prompt += f"With this context, please chat with the user, always staying in character as {name}."
226
-
227
-
228
- # ## Implement Evaluator
229
-
230
- # In[54]:
231
-
232
-
233
- from pydantic import BaseModel
234
-
235
- class Evaluation(BaseModel):
236
- is_acceptable: bool
237
- feedback: str
238
-
239
-
240
- # In[55]:
241
-
242
-
243
- evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \
244
- You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \
245
- The Agent is playing the role of {name} and is representing {name} on their website. \
246
- The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \
247
- The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:"
248
-
249
- evaluator_system_prompt += f"\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n"
250
- evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback if necessary."
251
-
252
-
253
- # In[56]:
254
-
255
-
256
- def evaluator_user_prompt(reply, message, history):
257
- user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n"
258
- user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n"
259
- user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n"
260
- user_prompt += f"Please evaluate the response, replying with whether it is acceptable."
261
- return user_prompt
262
-
263
-
264
- # In[57]:
265
-
266
-
267
- def evaluate(reply, message, history) -> Evaluation:
268
-
269
- messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
270
- response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
271
- return response.choices[0].message.parsed
272
-
273
-
274
- # In[58]:
275
-
276
-
277
- def push_evaluation(question, answer, evaluation):
278
- message_text = (
279
- f'New Evaluation:\n'
280
- f'Question: {question}\n'
281
- f'Agent answer: {answer}\n'
282
- f'Evaluation: {'is acceptable' if evaluation.is_acceptable else 'not acceptable'}'
283
- )
284
-
285
- #print("MESSAGE TEXT FÜR PUSH:", message_text)
286
-
287
- payload = {"user": pushover_user,
288
- "token": pushover_token,
289
- "message": message_text}
290
- requests.post(pushover_url, data=payload)
291
-
292
-
293
- # In[59]:
294
-
295
-
296
- def chat(message, history):
297
-
298
- messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
299
- done = False
300
- while not done:
301
-
302
-
303
- response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools)
304
-
305
- finish_reason = response.choices[0].finish_reason
306
-
307
-
308
- # Tool Calls
309
- if finish_reason=="tool_calls":
310
- msg = response.choices[0].message
311
- tool_calls = msg.tool_calls
312
- results = handle_tool_calls(tool_calls)
313
- messages.append(msg)
314
- messages.extend(results)
315
-
316
- response_final = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
317
- agent_reply = response_final.choices[0].message.content
318
-
319
- # Evaluation
320
- evaluation = evaluate(agent_reply, message, history)
321
- push_evaluation(message, agent_reply, evaluation)
322
-
323
- return agent_reply
324
-
325
- else:
326
- done = True
327
- return response.choices[0].message.content
328
-
329
-
330
- # In[ ]:
331
-
332
-
333
- demo = gr.ChatInterface(chat, type="messages")
334
-
335
-
336
- # In[ ]:
337
-
338
-
339
- if __name__ == '__main__':
340
- demo.launch()
341
-
342
-
343
- # In[ ]:
344
-
345
-
346
-
347
-
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Career Conversation Project
5
+
6
+ # In[41]:
7
+
8
+
9
+ from dotenv import load_dotenv
10
+ from openai import OpenAI
11
+ import json
12
+ import os
13
+ import requests
14
+ from pypdf import PdfReader
15
+ import gradio as gr
16
+
17
+
18
+ # In[42]:
19
+
20
+
21
+ load_dotenv(override=True)
22
+ openai = OpenAI()
23
+ gemini = OpenAI(
24
+ api_key = os.getenv('GOOGLE_API_KEY'),
25
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
26
+ )
27
+
28
+
29
+ # In[43]:
30
+
31
+
32
+ pushover_user = os.getenv("PUSHOVER_USER")
33
+ pushover_token = os.getenv("PUSHOVER_TOKEN")
34
+ pushover_url = "https://api.pushover.net/1/messages.json"
35
+
36
+
37
+ # In[44]:
38
+
39
+
40
+ reader = PdfReader("../me/linkedin.pdf")
41
+ linkedin = ""
42
+ for page in reader.pages:
43
+ text = page.extract_text()
44
+ if text:
45
+ linkedin += text
46
+
47
+ with open("../me/summary.txt", "r", encoding="utf-8") as f:
48
+ summary = f.read()
49
+
50
+ name = "Adriana Salcedo"
51
+
52
+
53
+ # In[45]:
54
+
55
+
56
+ def push(message):
57
+ print(f"Push: {message}")
58
+ payload = {"user": pushover_user, "token": pushover_token, "message": message}
59
+ requests.post(pushover_url, data=payload)
60
+
61
+
62
+ # ## Tools
63
+
64
+ # In[46]:
65
+
66
+
67
+ def record_user_details(email, name="Name not provided", notes="not provided"):
68
+ push(f"Recording interest from {name} with email {email} and notes {notes}")
69
+ return {"recorded": "ok"}
70
+
71
+
72
+ def record_unknown_question(question):
73
+ push(f"Recording {question} asked that I couldn't answer")
74
+ return {"recorded": "ok"}
75
+
76
+ def record_personal_question(question, acceptable):
77
+ if acceptable:
78
+ push(f'A personal question was asked and answered:\n {question}')
79
+ else:
80
+ push(f'A personal question was asked and not answered:\n {question}')
81
+ return {"recorded": "ok"}
82
+
83
+
84
+ def record_skill_question(question):
85
+ push(f'A skill-related question was asked:\n {question}')
86
+ return {'recorded': 'ok'}
87
+
88
+
89
+ # In[47]:
90
+
91
+
92
+ record_user_details_json = {
93
+ "name": "record_user_details",
94
+ "description": "Use this tool to record that a user is interested in being in touch and provided an email address",
95
+ "parameters": {
96
+ "type": "object",
97
+ "properties": {
98
+ "email": {
99
+ "type": "string",
100
+ "description": "The email address of this user"
101
+ },
102
+ "name": {
103
+ "type": "string",
104
+ "description": "The user's name, if they provided it"
105
+ }
106
+ ,
107
+ "notes": {
108
+ "type": "string",
109
+ "description": "Any additional information about the conversation that's worth recording to give context"
110
+ }
111
+ },
112
+ "required": ["email"],
113
+ "additionalProperties": False
114
+ }
115
+ }
116
+
117
+
118
+ # In[48]:
119
+
120
+
121
+ record_unknown_question_json = {
122
+ "name": "record_unknown_question",
123
+ "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
124
+ "parameters": {
125
+ "type": "object",
126
+ "properties": {
127
+ "question": {
128
+ "type": "string",
129
+ "description": "The question that couldn't be answered"
130
+ },
131
+ },
132
+ "required": ["question"],
133
+ "additionalProperties": False
134
+ }
135
+ }
136
+
137
+
138
+ # In[49]:
139
+
140
+
141
+ record_personal_question_json = {
142
+ 'name': 'record_personal_question',
143
+ 'description': 'Use this tool to log if a personal question was asked. Indicate if the question is acceptable (can be answered) or not.',
144
+ 'parameters': {
145
+ 'type': 'object',
146
+ 'properties': {
147
+ 'question': {
148
+ 'type': 'string',
149
+ 'description': 'Question that will not be answered'
150
+ },
151
+ 'acceptable': {
152
+ 'type': 'boolean',
153
+ 'description': 'Indicates if a question is acceptable or not'
154
+ }
155
+ },
156
+ 'required': ['question', 'acceptable'],
157
+ 'additionalProperties': False
158
+ }
159
+ }
160
+
161
+
162
+ # In[50]:
163
+
164
+
165
+ record_skill_question_json = {
166
+ 'name': 'record_skill_question',
167
+ 'description': (
168
+ "Whenever a user asks about any skill, technology, tool, programming language, or experience"
169
+ "regardless of whether it is present in the profile or not. ALWAYS use this tool to notify the owner. "
170
+ "Pass the original user question as the argument. "
171
+ "Examples: 'Do you know Python?', 'Have you worked with Tableau?', 'Are you familiar with cloud computing?'"
172
+ ),
173
+ 'parameters': {
174
+ 'type': 'object',
175
+ 'properties': {
176
+ 'question': {
177
+ 'type': 'string',
178
+ 'description': 'Skill-related question was asked'
179
+ },
180
+ },
181
+ 'required': ['question'],
182
+ 'additionalProperties': False
183
+ }
184
+ }
185
+
186
+
187
+ # In[51]:
188
+
189
+
190
+ tools = [{"type": "function", "function": record_user_details_json},
191
+ {"type": "function", "function": record_unknown_question_json},
192
+ {'type': 'function', 'function': record_personal_question_json},
193
+ {'type': 'function', 'function': record_skill_question_json}
194
+
195
+ ]
196
+
197
+
198
+ # In[52]:
199
+
200
+
201
+ def handle_tool_calls(tool_calls):
202
+ results = []
203
+ for tool_call in tool_calls:
204
+ tool_name = tool_call.function.name
205
+ arguments = json.loads(tool_call.function.arguments)
206
+ print(f"Tool called: {tool_name}", flush=True)
207
+ tool = globals().get(tool_name)
208
+ result = tool(**arguments) if tool else {}
209
+ results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id})
210
+ return results
211
+
212
+
213
+ # In[53]:
214
+
215
+
216
+ system_prompt = f"You are acting as {name}. You are answering questions on {name}'s website, \
217
+ particularly questions related to {name}'s career, background, skills and experience. \
218
+ Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \
219
+ You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \
220
+ Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
221
+ If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
222
+ If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. "
223
+
224
+ system_prompt += f"\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n"
225
+ system_prompt += f"With this context, please chat with the user, always staying in character as {name}."
226
+
227
+
228
+ # ## Implement Evaluator
229
+
230
+ # In[54]:
231
+
232
+
233
+ from pydantic import BaseModel
234
+
235
+ class Evaluation(BaseModel):
236
+ is_acceptable: bool
237
+ feedback: str
238
+
239
+
240
+ # In[55]:
241
+
242
+
243
+ evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \
244
+ You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \
245
+ The Agent is playing the role of {name} and is representing {name} on their website. \
246
+ The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \
247
+ The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:"
248
+
249
+ evaluator_system_prompt += f"\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n"
250
+ evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback if necessary."
251
+
252
+
253
+ # In[56]:
254
+
255
+
256
+ def evaluator_user_prompt(reply, message, history):
257
+ user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n"
258
+ user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n"
259
+ user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n"
260
+ user_prompt += f"Please evaluate the response, replying with whether it is acceptable."
261
+ return user_prompt
262
+
263
+
264
+ # In[57]:
265
+
266
+
267
+ def evaluate(reply, message, history) -> Evaluation:
268
+
269
+ messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
270
+ response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
271
+ return response.choices[0].message.parsed
272
+
273
+
274
+ # In[58]:
275
+
276
+
277
+ def push_evaluation(question, answer, evaluation):
278
+ message_text = (
279
+ f'New Evaluation:\n'
280
+ f'Question: {question}\n'
281
+ f'Agent answer: {answer}\n'
282
+ f"Evaluation: {'is acceptable' if evaluation.is_acceptable else 'not acceptable'}"
283
+ )
284
+
285
+ #print("MESSAGE TEXT FÜR PUSH:", message_text)
286
+
287
+ payload = {"user": pushover_user,
288
+ "token": pushover_token,
289
+ "message": message_text}
290
+ requests.post(pushover_url, data=payload)
291
+
292
+
293
+ # In[59]:
294
+
295
+
296
+ def chat(message, history):
297
+
298
+ messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
299
+ done = False
300
+ while not done:
301
+
302
+
303
+ response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools)
304
+
305
+ finish_reason = response.choices[0].finish_reason
306
+
307
+
308
+ # Tool Calls
309
+ if finish_reason=="tool_calls":
310
+ msg = response.choices[0].message
311
+ tool_calls = msg.tool_calls
312
+ results = handle_tool_calls(tool_calls)
313
+ messages.append(msg)
314
+ messages.extend(results)
315
+
316
+ response_final = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
317
+ agent_reply = response_final.choices[0].message.content
318
+
319
+ # Evaluation
320
+ evaluation = evaluate(agent_reply, message, history)
321
+ push_evaluation(message, agent_reply, evaluation)
322
+
323
+ return agent_reply
324
+
325
+ else:
326
+ done = True
327
+ return response.choices[0].message.content
328
+
329
+
330
+ # In[ ]:
331
+
332
+
333
+ demo = gr.ChatInterface(chat, type="messages")
334
+
335
+
336
+ # In[ ]:
337
+
338
+
339
+ if __name__ == '__main__':
340
+ demo.launch()
341
+
342
+
343
+ # In[ ]:
344
+
345
+
346
+
347
+