renaming message methods
Browse files- ChatAtomicFlow.py +5 -2
- run.py +2 -2
ChatAtomicFlow.py
CHANGED
@@ -394,6 +394,9 @@ class ChatAtomicFlow(AtomicFlow):
|
|
394 |
response = self.query_llm(input_data=input_data)
|
395 |
|
396 |
|
397 |
-
reply_message = self.
|
|
|
|
|
|
|
398 |
|
399 |
-
self.
|
|
|
394 |
response = self.query_llm(input_data=input_data)
|
395 |
|
396 |
|
397 |
+
reply_message = self.package_output_message(
|
398 |
+
input_message,
|
399 |
+
response = {"api_output": response}
|
400 |
+
)
|
401 |
|
402 |
+
self.send_message(reply_message,is_reply=True)
|
run.py
CHANGED
@@ -87,10 +87,10 @@ if __name__ == "__main__":
|
|
87 |
)
|
88 |
|
89 |
#option2: use the proxy_flow
|
90 |
-
#input_message = proxy_flow.
|
91 |
|
92 |
#7. ~~~ Run inference ~~~
|
93 |
-
future = proxy_flow.
|
94 |
|
95 |
#uncomment this line if you would like to get the full message back
|
96 |
#reply_message = future.get_message()
|
|
|
87 |
)
|
88 |
|
89 |
#option2: use the proxy_flow
|
90 |
+
#input_message = proxy_flow.package_input_message(data = data)
|
91 |
|
92 |
#7. ~~~ Run inference ~~~
|
93 |
+
future = proxy_flow.get_reply_future(input_message)
|
94 |
|
95 |
#uncomment this line if you would like to get the full message back
|
96 |
#reply_message = future.get_message()
|