mriusero commited on
Commit
9ec3492
·
1 Parent(s): 0d159dc
app.py CHANGED
@@ -15,6 +15,8 @@ STATE = {
15
  "running": False,
16
  "date": datetime.now(),
17
  "part_id": 0,
 
 
18
  "status": {},
19
  "data": {},
20
  }
 
15
  "running": False,
16
  "date": datetime.now(),
17
  "part_id": 0,
18
+ "cycle": 0,
19
+ "chat": [],
20
  "status": {},
21
  "data": {},
22
  }
data/downtimes.json CHANGED
@@ -1,10 +1 @@
1
- [
2
- {
3
- "Timestamp":"2025-06-11 02:40:38",
4
- "Event":"Machine Error",
5
- "Error Code":"E001",
6
- "Error Description":"Calibration Error",
7
- "Downtime Start":"2025-06-11 02:40:38",
8
- "Downtime End":"2025-06-11 02:55:38"
9
- }
10
- ]
 
1
+ "[]"
 
 
 
 
 
 
 
 
 
src/agent/stream.py CHANGED
@@ -4,7 +4,6 @@ import asyncio
4
  import re
5
 
6
  from src.agent.mistral_agent import MistralAgent
7
-
8
  from src.agent.utils.call import call_tool
9
 
10
 
@@ -28,34 +27,44 @@ def extract_phases(text):
28
 
29
  return phases
30
 
31
- async def respond(message, history=None):
 
 
32
  if history is None:
33
  history = []
34
 
35
- if not history or history[-1].role != "assistant" or history[-1].metadata.get("status") == "done":
36
- history.append(ChatMessage(role="assistant", content="", metadata={"title": "Thinking...", "status": "pending"}))
37
- yield history
38
-
39
- messages = [
40
- {"role": "system", "content": SYSTEM_PROMPT},
41
- {"role": "user", "content": message},
42
- {"role": "assistant", "content": "THINK: Let's start thinking, ", "prefix": True},
43
- ]
 
 
 
 
 
 
 
44
 
45
  phase_order = ["think", "act", "observe", "final"]
46
- current_phase_index = 0
47
  done = False
48
-
49
  final_full = ""
 
50
  while not done:
51
- current_phase = phase_order[current_phase_index]
52
  if current_phase != "final":
53
  full = ""
54
  else:
55
  full = final_full
56
 
57
  print('\n', '---' * 15)
58
- print(f">>> messages before payload [phase {current_phase_index}] :", json.dumps([m for m in messages if m.get("role") != "system"], indent=2))
 
59
  payload = {
60
  "agent_id": agent.agent_id,
61
  "messages": messages,
@@ -74,6 +83,7 @@ async def respond(message, history=None):
74
  async for chunk in response:
75
  delta = chunk.data.choices[0].delta
76
  content = delta.content or ""
 
77
  full += content
78
  if current_phase == "final":
79
  final_full = full
@@ -81,11 +91,11 @@ async def respond(message, history=None):
81
  phases = extract_phases(full)
82
  buffer = phases.get(current_phase, "")
83
  if current_phase == "think":
84
- history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Thinking...", "status": "pending"})
85
  elif current_phase == "act":
86
- history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Acting...", "status": "pending"})
87
  elif current_phase == "observe":
88
- history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Observing...", "status": "pending"})
89
  yield history
90
 
91
  if current_phase == "final":
@@ -98,7 +108,7 @@ async def respond(message, history=None):
98
  done = True
99
  break
100
 
101
- if current_phase_index == 0:
102
  messages = [msg for msg in messages if not msg.get("prefix")]
103
  if buffer:
104
  prefix_label = current_phase.upper() if current_phase != "final" else "FINAL ANSWER"
@@ -108,12 +118,12 @@ async def respond(message, history=None):
108
  "prefix": True
109
  })
110
 
111
- elif current_phase_index == 1:
112
  for message in messages:
113
  if "prefix" in message:
114
  del message["prefix"]
115
 
116
- if current_phase_index == 2:
117
  for message in messages:
118
  if "prefix" in message:
119
  del message["prefix"]
@@ -137,12 +147,12 @@ async def respond(message, history=None):
137
  last_tool_response = next((m for m in reversed(messages) if m["role"] == "tool"), None)
138
  if last_tool_response and last_tool_response.get("content"):
139
  buffer += "\n\n" + last_tool_response["content"]
140
- history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Acting...", "status": "pending"})
141
  yield history
142
 
143
  if not done:
144
- current_phase_index += 1
145
- if current_phase_index < len(phase_order):
146
  pass
147
  else:
148
  done = True
@@ -151,9 +161,20 @@ async def respond(message, history=None):
151
  final_text = phases.get("final", "")
152
 
153
  if observe_text:
154
- history[-1] = ChatMessage(role="assistant", content=observe_text, metadata={"title": "Observing...", "status": "done"})
155
-
 
 
 
 
156
  if final_text:
157
  history.append(ChatMessage(role="assistant", content=final_text))
158
 
 
 
 
 
 
 
 
159
  yield history
 
4
  import re
5
 
6
  from src.agent.mistral_agent import MistralAgent
 
7
  from src.agent.utils.call import call_tool
8
 
9
 
 
27
 
28
  return phases
29
 
30
+
31
+ async def respond(message, history=None, state=None):
32
+
33
  if history is None:
34
  history = []
35
 
36
+ if state["cycle"] == 0:
37
+ messages = [
38
+ {"role": "system", "content": SYSTEM_PROMPT},
39
+ {"role": "user", "content": message},
40
+ {"role": "assistant", "content": "THINK: Let's start thinking, ", "prefix": True},
41
+ ]
42
+ history.append(ChatMessage(role="assistant", content="", metadata={"title": "Thinking...", "status": "pending", 'id': state["cycle"]}))
43
+ yield history
44
+ else:
45
+ messages = state["chat"] + [
46
+ {"role": "user", "content": message},
47
+ {"role": "assistant", "content": "THINK: Let's start thinking, ", "prefix": True}
48
+ ]
49
+ history.append(ChatMessage(role="assistant", content=""))
50
+ history[-1] = (ChatMessage(role="assistant", content="", metadata={"title": "Thinking...", "status": "pending", 'id': state["cycle"]}))
51
+ yield history
52
 
53
  phase_order = ["think", "act", "observe", "final"]
54
+ phase_index = 0
55
  done = False
 
56
  final_full = ""
57
+
58
  while not done:
59
+ current_phase = phase_order[phase_index]
60
  if current_phase != "final":
61
  full = ""
62
  else:
63
  full = final_full
64
 
65
  print('\n', '---' * 15)
66
+ print(f">>> messages before payload [phase {phase_index}] :", json.dumps([m for m in messages if m.get("role") != "system"], indent=2))
67
+ #print(f">>> messages: {json.dumps(messages, indent=2)}")
68
  payload = {
69
  "agent_id": agent.agent_id,
70
  "messages": messages,
 
83
  async for chunk in response:
84
  delta = chunk.data.choices[0].delta
85
  content = delta.content or ""
86
+
87
  full += content
88
  if current_phase == "final":
89
  final_full = full
 
91
  phases = extract_phases(full)
92
  buffer = phases.get(current_phase, "")
93
  if current_phase == "think":
94
+ history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Thinking...", "status": "pending", "id": state['cycle'], 'parent_id': state["cycle"]})
95
  elif current_phase == "act":
96
+ history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Acting...", "status": "pending", "id": state['cycle']+1, 'parent_id': state["cycle"]})
97
  elif current_phase == "observe":
98
+ history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Observing...", "status": "pending", "id": state['cycle']+2, 'parent_id': state["cycle"]})
99
  yield history
100
 
101
  if current_phase == "final":
 
108
  done = True
109
  break
110
 
111
+ if phase_index == 0:
112
  messages = [msg for msg in messages if not msg.get("prefix")]
113
  if buffer:
114
  prefix_label = current_phase.upper() if current_phase != "final" else "FINAL ANSWER"
 
118
  "prefix": True
119
  })
120
 
121
+ elif phase_index == 1:
122
  for message in messages:
123
  if "prefix" in message:
124
  del message["prefix"]
125
 
126
+ if phase_index == 2:
127
  for message in messages:
128
  if "prefix" in message:
129
  del message["prefix"]
 
147
  last_tool_response = next((m for m in reversed(messages) if m["role"] == "tool"), None)
148
  if last_tool_response and last_tool_response.get("content"):
149
  buffer += "\n\n" + last_tool_response["content"]
150
+ history[-1] = ChatMessage(role="assistant", content=buffer, metadata={"title": "Acting...", "status": "pending", "id": state['cycle']+1, 'parent_id': state["cycle"]})
151
  yield history
152
 
153
  if not done:
154
+ phase_index += 1
155
+ if phase_index < len(phase_order):
156
  pass
157
  else:
158
  done = True
 
161
  final_text = phases.get("final", "")
162
 
163
  if observe_text:
164
+ history[-1] = ChatMessage(role="assistant", content=observe_text, metadata={"title": "Observing...", "status": "done", "id": state['cycle']+2, 'parent_id': state["cycle"]})
165
+ messages = [msg for msg in messages if not msg.get("prefix")]
166
+ messages.append({
167
+ "role": "assistant",
168
+ "content": observe_text,
169
+ })
170
  if final_text:
171
  history.append(ChatMessage(role="assistant", content=final_text))
172
 
173
+ last_message = messages[-1]
174
+ last_message["content"] += ' FINAL ANSWER: ' + final_text
175
+ messages[-1] = last_message
176
+
177
+ state["cycle"] += 1
178
+ state["chat"] = messages
179
+
180
  yield history
src/agent/tools/check_downtines.py CHANGED
@@ -14,7 +14,7 @@ def get_downtimes() -> str:
14
 
15
  data = json.loads(json_string)
16
 
17
- if data is None or len(data) == 0:
18
  result = "No downtimes recorded yet. Please check the production status or wait for downtimes to occur."
19
  else:
20
  result = "## Downtimes:\n\n"
 
14
 
15
  data = json.loads(json_string)
16
 
17
+ if data is None or len(data) == 0 or data == "[]":
18
  result = "No downtimes recorded yet. Please check the production status or wait for downtimes to occur."
19
  else:
20
  result = "## Downtimes:\n\n"
src/production/metrics/machine.py CHANGED
@@ -65,5 +65,4 @@ async def fetch_issues(raw_data):
65
  selected_issues = issues[
66
  ["Timestamp", "Event", "Error Code", "Error Description", "Downtime Start", "Downtime End"]
67
  ]
68
- selected_issues.to_json('data/downtimes.json', orient="records", indent=4)
69
  return selected_issues
 
65
  selected_issues = issues[
66
  ["Timestamp", "Event", "Error Code", "Error Description", "Downtime Start", "Downtime End"]
67
  ]
 
68
  return selected_issues
src/ui/dashboard.py CHANGED
@@ -181,6 +181,9 @@ async def on_tick(state, displays):
181
  with open("data/status.json", "w") as f:
182
  json.dump(state["status"], f, indent=4)
183
 
 
 
 
184
  return tool_plots + general_plots + [state]
185
 
186
  def dashboard_ui(state):
 
181
  with open("data/status.json", "w") as f:
182
  json.dump(state["status"], f, indent=4)
183
 
184
+ with open("data/downtimes.json", "w") as f:
185
+ json.dump(issues_df.to_json(orient='records'), f, indent=4)
186
+
187
  return tool_plots + general_plots + [state]
188
 
189
  def dashboard_ui(state):
src/ui/sidebar.py CHANGED
@@ -50,6 +50,7 @@ def sidebar_ui(state, width=700, visible=True):
50
  # ["How do I troubleshoot a specific piece of equipment?"],
51
  # ["What are the best practices for maintaining production efficiency?"]
52
  ],
 
53
  cache_examples=False
54
  )
55
  sessions_state = gr.JSON(
 
50
  # ["How do I troubleshoot a specific piece of equipment?"],
51
  # ["What are the best practices for maintaining production efficiency?"]
52
  ],
53
+ additional_inputs=[state],
54
  cache_examples=False
55
  )
56
  sessions_state = gr.JSON(