Asilbek14 commited on
Commit
8dba69a
·
verified ·
1 Parent(s): 385b181

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -30,29 +30,26 @@ generator = pipeline(
30
 
31
  # ---------------- HELPERS ----------------
32
  def format_prompt(chat_history, user_message, system_message, response_style):
 
33
  prompt = system_message + "\n\n"
34
 
35
- # Include past conversation without "User/Assistant" prefixes
36
  for turn in chat_history:
37
- if turn["role"] == "user":
38
- prompt += f"Question: {turn['content']}\n"
39
- else:
40
- prompt += f"Answer: {turn['content']}\n"
41
 
42
- # Add the current message
43
- prompt += f"Question: {user_message}\nAnswer:"
44
 
45
- # Style control
46
  if response_style == "No explanation":
47
- prompt += " Give only the direct answer, no explanation."
48
  elif response_style == "Short explanation":
49
- prompt += " Give a short one-sentence explanation."
50
  elif response_style == "Detailed explanation":
51
- prompt += " Give a detailed answer with reasoning and examples."
52
 
53
  return prompt
54
 
55
-
56
  # ---------------- CHAT FUNCTION ----------------
57
  def chat(user_message, chat_history, system_message, max_tokens, temperature, top_p, response_style):
58
  chat_history = chat_history or []
@@ -66,8 +63,10 @@ def chat(user_message, chat_history, system_message, max_tokens, temperature, to
66
  top_p=top_p,
67
  )[0]['generated_text']
68
 
69
- response = output.strip()
 
70
 
 
71
  chat_history.append({"role": "user", "content": user_message})
72
  chat_history.append({"role": "assistant", "content": response})
73
 
 
30
 
31
  # ---------------- HELPERS ----------------
32
  def format_prompt(chat_history, user_message, system_message, response_style):
33
+ # Start with system message
34
  prompt = system_message + "\n\n"
35
 
36
+ # Append previous conversation content only
37
  for turn in chat_history:
38
+ prompt += f"{turn['content']}\n"
 
 
 
39
 
40
+ # Append the new user message
41
+ prompt += user_message
42
 
43
+ # Optional: add response style instruction
44
  if response_style == "No explanation":
45
+ prompt += " Provide only the answer."
46
  elif response_style == "Short explanation":
47
+ prompt += " Provide a short one-sentence explanation."
48
  elif response_style == "Detailed explanation":
49
+ prompt += " Provide a detailed explanation."
50
 
51
  return prompt
52
 
 
53
  # ---------------- CHAT FUNCTION ----------------
54
  def chat(user_message, chat_history, system_message, max_tokens, temperature, top_p, response_style):
55
  chat_history = chat_history or []
 
63
  top_p=top_p,
64
  )[0]['generated_text']
65
 
66
+ # Sometimes Flan-T5 outputs the question in the result, so strip it
67
+ response = output.replace(prompt, "").strip()
68
 
69
+ # Save user and assistant content without labels
70
  chat_history.append({"role": "user", "content": user_message})
71
  chat_history.append({"role": "assistant", "content": response})
72