Spaces:
Paused
Paused
Husnain
commited on
💎 [Feature] Enable non-stream return, and prettify logger
Browse files- networks/openai_streamer.py +64 -34
networks/openai_streamer.py
CHANGED
|
@@ -35,7 +35,9 @@ class OpenaiRequester:
|
|
| 35 |
logger.note(f"> {method}:", end=" ")
|
| 36 |
logger.mesg(f"{url}", end=" ")
|
| 37 |
|
| 38 |
-
def log_response(
|
|
|
|
|
|
|
| 39 |
status_code = res.status_code
|
| 40 |
status_code_str = f"[{status_code}]"
|
| 41 |
|
|
@@ -46,35 +48,41 @@ class OpenaiRequester:
|
|
| 46 |
|
| 47 |
logger_func(status_code_str)
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
def get_models(self):
|
| 80 |
self.log_request(self.api_models)
|
|
@@ -142,8 +150,7 @@ class OpenaiRequester:
|
|
| 142 |
impersonate="chrome120",
|
| 143 |
stream=True,
|
| 144 |
)
|
| 145 |
-
|
| 146 |
-
self.log_response(res, stream=True, verbose=True)
|
| 147 |
return res
|
| 148 |
|
| 149 |
|
|
@@ -177,7 +184,7 @@ class OpenaiStreamer:
|
|
| 177 |
requester.auth()
|
| 178 |
return requester.chat_completions(messages, verbose=False)
|
| 179 |
|
| 180 |
-
def chat_return_generator(self, stream_response: requests.Response):
|
| 181 |
content_offset = 0
|
| 182 |
is_finished = False
|
| 183 |
|
|
@@ -206,7 +213,8 @@ class OpenaiStreamer:
|
|
| 206 |
continue
|
| 207 |
delta_content = content[content_offset:]
|
| 208 |
content_offset = len(content)
|
| 209 |
-
|
|
|
|
| 210 |
else:
|
| 211 |
continue
|
| 212 |
except Exception as e:
|
|
@@ -219,3 +227,25 @@ class OpenaiStreamer:
|
|
| 219 |
|
| 220 |
if not is_finished:
|
| 221 |
yield self.message_outputer.output(content="", content_type="Finished")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
logger.note(f"> {method}:", end=" ")
|
| 36 |
logger.mesg(f"{url}", end=" ")
|
| 37 |
|
| 38 |
+
def log_response(
|
| 39 |
+
self, res: requests.Response, stream=False, iter_lines=False, verbose=False
|
| 40 |
+
):
|
| 41 |
status_code = res.status_code
|
| 42 |
status_code_str = f"[{status_code}]"
|
| 43 |
|
|
|
|
| 48 |
|
| 49 |
logger_func(status_code_str)
|
| 50 |
|
| 51 |
+
logger.enter_quiet(not verbose)
|
| 52 |
+
|
| 53 |
+
if stream:
|
| 54 |
+
if not iter_lines:
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
if not hasattr(self, "content_offset"):
|
| 58 |
+
self.content_offset = 0
|
| 59 |
+
|
| 60 |
+
for line in res.iter_lines():
|
| 61 |
+
line = line.decode("utf-8")
|
| 62 |
+
line = re.sub(r"^data:\s*", "", line)
|
| 63 |
+
if re.match(r"^\[DONE\]", line):
|
| 64 |
+
logger.success("\n[Finished]")
|
| 65 |
+
break
|
| 66 |
+
line = line.strip()
|
| 67 |
+
if line:
|
| 68 |
+
try:
|
| 69 |
+
data = json.loads(line, strict=False)
|
| 70 |
+
message_role = data["message"]["author"]["role"]
|
| 71 |
+
message_status = data["message"]["status"]
|
| 72 |
+
if (
|
| 73 |
+
message_role == "assistant"
|
| 74 |
+
and message_status == "in_progress"
|
| 75 |
+
):
|
| 76 |
+
content = data["message"]["content"]["parts"][0]
|
| 77 |
+
delta_content = content[self.content_offset :]
|
| 78 |
+
self.content_offset = len(content)
|
| 79 |
+
logger_func(delta_content, end="")
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.warn(e)
|
| 82 |
+
else:
|
| 83 |
+
logger_func(res.json())
|
| 84 |
+
|
| 85 |
+
logger.exit_quiet(not verbose)
|
| 86 |
|
| 87 |
def get_models(self):
|
| 88 |
self.log_request(self.api_models)
|
|
|
|
| 150 |
impersonate="chrome120",
|
| 151 |
stream=True,
|
| 152 |
)
|
| 153 |
+
self.log_response(res, stream=True, iter_lines=False)
|
|
|
|
| 154 |
return res
|
| 155 |
|
| 156 |
|
|
|
|
| 184 |
requester.auth()
|
| 185 |
return requester.chat_completions(messages, verbose=False)
|
| 186 |
|
| 187 |
+
def chat_return_generator(self, stream_response: requests.Response, verbose=False):
|
| 188 |
content_offset = 0
|
| 189 |
is_finished = False
|
| 190 |
|
|
|
|
| 213 |
continue
|
| 214 |
delta_content = content[content_offset:]
|
| 215 |
content_offset = len(content)
|
| 216 |
+
if verbose:
|
| 217 |
+
logger.success(delta_content, end="")
|
| 218 |
else:
|
| 219 |
continue
|
| 220 |
except Exception as e:
|
|
|
|
| 227 |
|
| 228 |
if not is_finished:
|
| 229 |
yield self.message_outputer.output(content="", content_type="Finished")
|
| 230 |
+
|
| 231 |
+
def chat_return_dict(self, stream_response: requests.Response):
|
| 232 |
+
final_output = self.message_outputer.default_data.copy()
|
| 233 |
+
final_output["choices"] = [
|
| 234 |
+
{
|
| 235 |
+
"index": 0,
|
| 236 |
+
"finish_reason": "stop",
|
| 237 |
+
"message": {"role": "assistant", "content": ""},
|
| 238 |
+
}
|
| 239 |
+
]
|
| 240 |
+
final_content = ""
|
| 241 |
+
for item in self.chat_return_generator(stream_response):
|
| 242 |
+
try:
|
| 243 |
+
data = json.loads(item)
|
| 244 |
+
delta = data["choices"][0]["delta"]
|
| 245 |
+
delta_content = delta.get("content", "")
|
| 246 |
+
if delta_content:
|
| 247 |
+
final_content += delta_content
|
| 248 |
+
except Exception as e:
|
| 249 |
+
logger.warn(e)
|
| 250 |
+
final_output["choices"][0]["message"]["content"] = final_content.strip()
|
| 251 |
+
return final_output
|