frimelle HF Staff commited on
Commit
b6dc8c1
·
1 Parent(s): aa3ff8c

little reset

Browse files
Files changed (1) hide show
  1. app.py +32 -29
app.py CHANGED
@@ -3,46 +3,51 @@ from huggingface_hub import InferenceClient
3
  from datetime import datetime
4
  import os
5
  import uuid
6
- from huggingface_hub import HfApi
7
-
8
- api = HfApi()
9
 
10
  # ---- System Prompt ----
11
  with open("system_prompt.txt", "r") as f:
12
  SYSTEM_PROMPT = f.read()
13
 
14
- # ---- Constants ----
15
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
 
16
  DATASET_REPO = "frimelle/companion-chat-logs"
17
  HF_TOKEN = os.environ.get("HF_TOKEN") # set in Space secrets
 
18
  client = InferenceClient(MODEL_NAME)
19
 
20
- # ---- Upload to Dataset ----
21
- def upload_chat_to_dataset(user_message, assistant_message, system_prompt):
22
- row = {
23
- "timestamp": datetime.now().isoformat(),
24
- "session_id": str(uuid.uuid4()),
25
- "user": user_message,
26
- "assistant": assistant_message,
27
- "system_prompt": system_prompt,
28
- }
29
 
30
- dataset = Dataset.from_dict({k: [v] for k, v in row.items()})
31
- dataset.push_to_hub(DATASET_REPO, private=True, token=HF_TOKEN)
 
 
 
 
32
 
33
- # ---- Chat Function ----
34
- def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
35
  messages = [{"role": "system", "content": system_message}]
36
 
37
- for user_msg, bot_msg in history:
38
- if user_msg:
39
- messages.append({"role": "user", "content": user_msg})
40
- if bot_msg:
41
- messages.append({"role": "assistant", "content": bot_msg})
42
 
43
  messages.append({"role": "user", "content": message})
44
 
45
  response = ""
 
46
  for chunk in client.chat_completion(
47
  messages,
48
  max_tokens=max_tokens,
@@ -55,22 +60,20 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
55
  response += token
56
  yield response
57
 
58
- # Log the final full message to the dataset
59
- upload_chat_to_dataset(message, response, system_message)
60
 
61
- # ---- Gradio UI ----
62
  demo = gr.ChatInterface(
63
- fn=respond,
64
  #additional_inputs=[
65
  # gr.Textbox(value=SYSTEM_PROMPT, label="System message"),
66
  # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
67
  # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
68
  # gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
69
  #],
70
- title="BoundrAI",
71
  )
72
 
73
- print(api.whoami(token=HF_TOKEN))
74
-
75
  if __name__ == "__main__":
76
  demo.launch()
 
3
  from datetime import datetime
4
  import os
5
  import uuid
 
 
 
6
 
7
  # ---- System Prompt ----
8
  with open("system_prompt.txt", "r") as f:
9
  SYSTEM_PROMPT = f.read()
10
 
 
11
  MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
12
+
13
  DATASET_REPO = "frimelle/companion-chat-logs"
14
  HF_TOKEN = os.environ.get("HF_TOKEN") # set in Space secrets
15
+
16
  client = InferenceClient(MODEL_NAME)
17
 
18
+ # ---- Setup logging ----
19
+ LOG_DIR = "chat_logs"
20
+ os.makedirs(LOG_DIR, exist_ok=True)
21
+ session_id = str(uuid.uuid4())
 
 
 
 
 
22
 
23
+ def log_chat(session_id, user_msg, bot_msg):
24
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
25
+ log_path = os.path.join(LOG_DIR, f"{session_id}.txt")
26
+ with open(log_path, "a", encoding="utf-8") as f:
27
+ f.write(f"[{timestamp}] User: {user_msg}\n")
28
+ f.write(f"[{timestamp}] Bot: {bot_msg}\n\n")
29
 
30
+ # ---- Respond Function with Logging ----
31
+ def respond(
32
+ message,
33
+ history: list[tuple[str, str]],
34
+ system_message,
35
+ max_tokens,
36
+ temperature,
37
+ top_p,
38
+ ):
39
  messages = [{"role": "system", "content": system_message}]
40
 
41
+ for val in history:
42
+ if val[0]:
43
+ messages.append({"role": "user", "content": val[0]})
44
+ if val[1]:
45
+ messages.append({"role": "assistant", "content": val[1]})
46
 
47
  messages.append({"role": "user", "content": message})
48
 
49
  response = ""
50
+
51
  for chunk in client.chat_completion(
52
  messages,
53
  max_tokens=max_tokens,
 
60
  response += token
61
  yield response
62
 
63
+ # Save full message after stream ends
64
+ log_chat(session_id, message, response)
65
 
66
+ # ---- Gradio Interface ----
67
  demo = gr.ChatInterface(
68
+ respond,
69
  #additional_inputs=[
70
  # gr.Textbox(value=SYSTEM_PROMPT, label="System message"),
71
  # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
72
  # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
73
  # gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
74
  #],
75
+ title="BoundrAI"
76
  )
77
 
 
 
78
  if __name__ == "__main__":
79
  demo.launch()