Spaces:
Paused
Paused
File size: 2,810 Bytes
22d76f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
from typing import List, Dict
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
AutoModelForCausalLM,
pipeline
)
class AdviceGenerator(object):
def __init__(self, llm):
self.llm = llm
self.role = {
"role": "system",
"content": (
"You are a supportive assistant (not a mental health professional). "
"Be concrete and tailor every response to the user's situation. "
"Requirements:\n"
"1) Begin with ONE empathetic sentence that mentions a key detail from the user's text (name, event, constraint).\n"
"2) Then give 3–5 numbered, practical tips. Each tip must reference the user's situation (use names/keywords when present).\n"
"3) If the user's text involves talking to someone (crush, friend, teacher, parent, boss), include a short **Script** block "
" with two options (in-person and text), customized with any names from the user's text.\n"
"4) Add a **Try now (2 min)** micro-step.\n"
"5) End with ONE targeted follow-up question that references the user's situation.\n"
"Avoid platitudes and generic advice; avoid clinical instructions."
),
}
def generate_advice(
self,
disorder: str,
user_text: str,
history: List[Dict[str, str]] = None,
max_history_msgs: int = 50,
max_tokens: int = 600, # give enough headroom
temperature: float = 0.6,
top_p: float = 0.9,
) -> Dict[str, str]:
msgs = [self.role]
# preserve rolling chat history if available
if history:
msgs.extend(history[-max_history_msgs:])
# always append the new user input
msgs.append({
"role": "user",
"content": (
"Use the exact situation below to personalize your advice. "
"Extract the main goal or barrier from the text and ground each tip in it.\n\n"
f"Detected context: {disorder}\n"
f"User text: {user_text}\n\n"
"Follow the system instructions strictly. Do NOT ask vague questions first."
),
})
try:
resp = self.llm.create_chat_completion(
messages=msgs,
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
stream=False,
)
text = resp["choices"][0]["message"]["content"].strip()
return {"text": text}
except Exception as e:
return {"text": f"I'm here to listen. Could you tell me more about how \"{user_text}\" is affecting you?"} |