Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -23,17 +23,13 @@ API_KEYS = [
|
|
23 |
os.getenv("API_GEMINI_2"),
|
24 |
os.getenv("API_GEMINI_3"),
|
25 |
os.getenv("API_GEMINI_4"),
|
26 |
-
os.getenv("API_GEMINI_5")
|
27 |
]
|
28 |
|
29 |
# Classi Pydantic di VALIDAZIONE Body
|
30 |
-
class Message(BaseModel):
|
31 |
-
role: Any
|
32 |
-
content: Any
|
33 |
-
|
34 |
class ChatCompletionRequest(BaseModel):
|
35 |
model: str = "gemini-2.0-flash"
|
36 |
-
messages:
|
37 |
max_tokens: Optional[int] = 8196
|
38 |
temperature: Optional[float] = 0.8
|
39 |
stream: Optional[bool] = False
|
@@ -125,4 +121,8 @@ async def chat_completions(req: ChatCompletionRequest):
|
|
125 |
else:
|
126 |
return StreamingResponse(_resp_async_generator(req), media_type="application/x-ndjson")
|
127 |
except Exception as e:
|
128 |
-
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
|
|
|
|
23 |
os.getenv("API_GEMINI_2"),
|
24 |
os.getenv("API_GEMINI_3"),
|
25 |
os.getenv("API_GEMINI_4"),
|
26 |
+
os.getenv("API_GEMINI_5"),
|
27 |
]
|
28 |
|
29 |
# Classi Pydantic di VALIDAZIONE Body
|
|
|
|
|
|
|
|
|
30 |
class ChatCompletionRequest(BaseModel):
|
31 |
model: str = "gemini-2.0-flash"
|
32 |
+
messages: Optional[Any]
|
33 |
max_tokens: Optional[int] = 8196
|
34 |
temperature: Optional[float] = 0.8
|
35 |
stream: Optional[bool] = False
|
|
|
121 |
else:
|
122 |
return StreamingResponse(_resp_async_generator(req), media_type="application/x-ndjson")
|
123 |
except Exception as e:
|
124 |
+
raise HTTPException(status_code=500, detail=str(e))
|
125 |
+
|
126 |
+
if __name__ == "__main__":
|
127 |
+
import uvicorn
|
128 |
+
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|