Spaces:
Build error
Build error
Update functions.py
Browse files- functions.py +16 -14
functions.py
CHANGED
|
@@ -47,17 +47,19 @@ def select_document_section_by_query_similarity(query: str, contexts: dict[(str,
|
|
| 47 |
|
| 48 |
return document_similarities[0]
|
| 49 |
|
| 50 |
-
def construct_prompt(
|
| 51 |
"""
|
| 52 |
-
|
| 53 |
"""
|
| 54 |
-
_ , chosen_service = select_document_section_by_query_similarity(
|
| 55 |
|
| 56 |
service_description = df.loc[chosen_service].description.replace("\n", " ")
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
| 59 |
link = df.loc[chosen_service].link
|
| 60 |
-
return
|
| 61 |
|
| 62 |
def answer_query_with_context(
|
| 63 |
query: str,
|
|
@@ -65,24 +67,24 @@ def answer_query_with_context(
|
|
| 65 |
document_embeddings: dict[(str, str), np.array],
|
| 66 |
show_prompt: bool = False
|
| 67 |
) -> str:
|
|
|
|
| 68 |
prompt, link = construct_prompt(
|
| 69 |
query,
|
| 70 |
document_embeddings,
|
| 71 |
df
|
| 72 |
)
|
| 73 |
|
| 74 |
-
if show_prompt:
|
| 75 |
-
print(prompt)
|
| 76 |
-
|
| 77 |
response = openai.ChatCompletion.create(
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
end_message = "\n\nPlease check out the relevant HMC service catalogue for more details: "+ link
|
| 83 |
end_message += """\n\nIf not satisfied with the answer, please email [email protected], call 909.607.7777 or visit the Helpdesk located on the Sprague first floor. """
|
| 84 |
end_message += """Helpdesk representatives are also available for a remote chat session during normal hours on Monday - Friday, 8:00 AM - 5:00 PM PST via https://helpdesk.hmc.edu"""
|
| 85 |
-
|
| 86 |
-
reply = response["choices"][0]["text"] + end_message
|
| 87 |
|
| 88 |
return reply
|
|
|
|
| 47 |
|
| 48 |
return document_similarities[0]
|
| 49 |
|
| 50 |
+
def construct_prompt(query: str, context_embeddings: dict, df: pd.DataFrame) -> str:
|
| 51 |
"""
|
| 52 |
+
Construct the prompt for the ChatCompletion API
|
| 53 |
"""
|
| 54 |
+
_ , chosen_service = select_document_section_by_query_similarity(query, context_embeddings)
|
| 55 |
|
| 56 |
service_description = df.loc[chosen_service].description.replace("\n", " ")
|
| 57 |
+
introduction = "Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "
|
| 58 |
+
introduction += "I could not find an answer to your question, please reach out to Helpdesk."
|
| 59 |
+
question = f"\n\nQ: {query}"
|
| 60 |
+
message = introduction + "\n* " + "\n\nContext:\n" + service_description + question
|
| 61 |
link = df.loc[chosen_service].link
|
| 62 |
+
return message, link
|
| 63 |
|
| 64 |
def answer_query_with_context(
|
| 65 |
query: str,
|
|
|
|
| 67 |
document_embeddings: dict[(str, str), np.array],
|
| 68 |
show_prompt: bool = False
|
| 69 |
) -> str:
|
| 70 |
+
|
| 71 |
prompt, link = construct_prompt(
|
| 72 |
query,
|
| 73 |
document_embeddings,
|
| 74 |
df
|
| 75 |
)
|
| 76 |
|
|
|
|
|
|
|
|
|
|
| 77 |
response = openai.ChatCompletion.create(
|
| 78 |
+
model = COMPLETIONS_MODEL, # "gpt-3.5-turbo",
|
| 79 |
+
messages=[
|
| 80 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 81 |
+
{"role": "user", "content": prompt}
|
| 82 |
+
]
|
| 83 |
+
)
|
| 84 |
|
| 85 |
end_message = "\n\nPlease check out the relevant HMC service catalogue for more details: "+ link
|
| 86 |
end_message += """\n\nIf not satisfied with the answer, please email [email protected], call 909.607.7777 or visit the Helpdesk located on the Sprague first floor. """
|
| 87 |
end_message += """Helpdesk representatives are also available for a remote chat session during normal hours on Monday - Friday, 8:00 AM - 5:00 PM PST via https://helpdesk.hmc.edu"""
|
| 88 |
+
reply = response["choices"][0]["message"]["content"] + end_message
|
|
|
|
| 89 |
|
| 90 |
return reply
|