TraumaBackend / trauma /api /message /ai /openai_request.py
brestok's picture
finish ai
3ec35ef
raw
history blame
2.35 kB
import json
from trauma.api.chat.dto import EntityData
from trauma.api.message.ai.prompts import TraumaPrompts
from trauma.core.config import settings
from trauma.core.wrappers import openai_wrapper
@openai_wrapper(is_json=True)
async def update_entity_data_with_ai(entity_data: EntityData, user_message: str, assistant_message: str):
messages = [
{
"role": "system",
"content": TraumaPrompts.update_entity_data_with_ai
.replace("{entity_data}", entity_data.model_dump_json(indent=2))
.replace("{assistant_message}", assistant_message)
.replace("{user_message}", user_message)
}
]
return messages
@openai_wrapper(temperature=0.8)
async def generate_next_question(empty_field: str, instructions: str, user_message: str, message_history: list[dict]):
messages = [
{
"role": "system",
"content": TraumaPrompts.generate_next_question
.replace("{empty_field}", empty_field)
.replace("{instructions}", instructions)
},
*message_history,
{
"role": "user",
"content": user_message
}
]
return messages
@openai_wrapper(temperature=0.4)
async def generate_search_request(user_messages_str: str, entity_data: dict):
messages = [
{
"role": "system",
"content": TraumaPrompts.generate_search_request
.replace("{entity_data}", json.dumps(entity_data, indent=2))
.replace("{user_messages_str}", user_messages_str)
}
]
return messages
@openai_wrapper(temperature=0.4)
async def generate_final_response(final_entities: str, user_message: str, message_history: list[dict]):
messages = [
{
"role": "system",
"content": TraumaPrompts.generate_recommendation_decision
.replace("{final_entities}", final_entities)
},
*message_history,
{
"role": "user",
"content": user_message
}
]
return messages
async def convert_value_to_embeddings(value: str) -> list[float]:
embeddings = await settings.OPENAI_CLIENT.embeddings.create(
input=value,
model='text-embedding-3-large',
dimensions=1536,
)
return embeddings.data[0].embedding