matthoffner commited on
Commit
a2f46f0
ยท
1 Parent(s): 716d802

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +7 -8
main.py CHANGED
@@ -2,9 +2,8 @@ import fastapi
2
  import json
3
  import markdown
4
  import uvicorn
5
- from fastapi.responses import HTMLResponse
6
  from fastapi.middleware.cors import CORSMiddleware
7
- from sse_starlette.sse import EventSourceResponse
8
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
9
  from ctransformers import AutoModelForCausalLM
10
  from pydantic import BaseModel
@@ -37,10 +36,10 @@ async def chat(prompt = "Once upon a time there was a "):
37
  completion = llm(prompt)
38
  async def server_sent_events(chat_chunks):
39
  for chat_chunk in chat_chunks:
40
- yield dict(data=json.dumps(chat_chunk))
41
- yield dict(data="[DONE]")
42
 
43
- return EventSourceResponse(server_sent_events(completion))
44
 
45
  @app.post("/v1/chat/completions")
46
  async def chat(request: ChatCompletionRequest, response_mode=None):
@@ -48,10 +47,10 @@ async def chat(request: ChatCompletionRequest, response_mode=None):
48
  async def server_sent_events(chat_chunks):
49
  for chat_chunk in chat_chunks:
50
  print(chat_chunk)
51
- yield dict(data=json.dumps(chat_chunk))
52
- yield dict(data="[DONE]")
53
 
54
- return EventSourceResponse(server_sent_events(completion))
55
 
56
  if __name__ == "__main__":
57
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
2
  import json
3
  import markdown
4
  import uvicorn
5
+ from fastapi.responses import StreamingResponse, HTMLResponse
6
  from fastapi.middleware.cors import CORSMiddleware
 
7
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
8
  from ctransformers import AutoModelForCausalLM
9
  from pydantic import BaseModel
 
36
  completion = llm(prompt)
37
  async def server_sent_events(chat_chunks):
38
  for chat_chunk in chat_chunks:
39
+ yield chat_chunk
40
+ yield "[DONE]"
41
 
42
+ return StreamingResponse(server_sent_events(completion))
43
 
44
  @app.post("/v1/chat/completions")
45
  async def chat(request: ChatCompletionRequest, response_mode=None):
 
47
  async def server_sent_events(chat_chunks):
48
  for chat_chunk in chat_chunks:
49
  print(chat_chunk)
50
+ yield chat_chunk
51
+ yield "[DONE]"
52
 
53
+ return StreamingResponse(server_sent_events(completion))
54
 
55
  if __name__ == "__main__":
56
  uvicorn.run(app, host="0.0.0.0", port=8000)