Agregando endpoints API para integracion con chat NTIA
Browse files- app.py +92 -1
- requirements.txt +4 -1
app.py
CHANGED
|
@@ -562,4 +562,95 @@ if __name__ == "__main__":
|
|
| 562 |
server_name="0.0.0.0",
|
| 563 |
server_port=7860,
|
| 564 |
share=False
|
| 565 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 562 |
server_name="0.0.0.0",
|
| 563 |
server_port=7860,
|
| 564 |
share=False
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
# Endpoints API para integraci贸n con chat NTIA
|
| 568 |
+
import gradio_client
|
| 569 |
+
from fastapi import FastAPI, HTTPException
|
| 570 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 571 |
+
from pydantic import BaseModel
|
| 572 |
+
import base64
|
| 573 |
+
from io import BytesIO
|
| 574 |
+
|
| 575 |
+
app = FastAPI(title="NTIA Space API")
|
| 576 |
+
|
| 577 |
+
# Configurar CORS
|
| 578 |
+
app.add_middleware(
|
| 579 |
+
CORSMiddleware,
|
| 580 |
+
allow_origins=["*"],
|
| 581 |
+
allow_credentials=True,
|
| 582 |
+
allow_methods=["*"],
|
| 583 |
+
allow_headers=["*"],
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
class TextRequest(BaseModel):
|
| 587 |
+
prompt: str
|
| 588 |
+
model_name: str
|
| 589 |
+
max_length: int = 100
|
| 590 |
+
|
| 591 |
+
class ImageRequest(BaseModel):
|
| 592 |
+
prompt: str
|
| 593 |
+
model_name: str
|
| 594 |
+
num_inference_steps: int = 20
|
| 595 |
+
|
| 596 |
+
class VideoRequest(BaseModel):
|
| 597 |
+
prompt: str
|
| 598 |
+
model_name: str
|
| 599 |
+
num_frames: int = 16
|
| 600 |
+
num_inference_steps: int = 20
|
| 601 |
+
|
| 602 |
+
class ChatRequest(BaseModel):
|
| 603 |
+
message: str
|
| 604 |
+
history: list
|
| 605 |
+
model_name: str
|
| 606 |
+
|
| 607 |
+
@app.post("/generate_text")
|
| 608 |
+
async def api_generate_text(request: TextRequest):
|
| 609 |
+
try:
|
| 610 |
+
result = generate_text(request.prompt, request.model_name, request.max_length)
|
| 611 |
+
return {"response": result}
|
| 612 |
+
except Exception as e:
|
| 613 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 614 |
+
|
| 615 |
+
@app.post("/generate_image")
|
| 616 |
+
async def api_generate_image(request: ImageRequest):
|
| 617 |
+
try:
|
| 618 |
+
result = generate_image(request.prompt, request.model_name, request.num_inference_steps)
|
| 619 |
+
|
| 620 |
+
# Convertir imagen a base64
|
| 621 |
+
if isinstance(result, str) and result.startswith("Error"):
|
| 622 |
+
raise HTTPException(status_code=500, detail=result)
|
| 623 |
+
|
| 624 |
+
# Convertir PIL Image a base64
|
| 625 |
+
buffer = BytesIO()
|
| 626 |
+
result.save(buffer, format="PNG")
|
| 627 |
+
img_str = base64.b64encode(buffer.getvalue()).decode()
|
| 628 |
+
|
| 629 |
+
return {"image": f"data:image/png;base64,{img_str}"}
|
| 630 |
+
except Exception as e:
|
| 631 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 632 |
+
|
| 633 |
+
@app.post("/generate_video")
|
| 634 |
+
async def api_generate_video(request: VideoRequest):
|
| 635 |
+
try:
|
| 636 |
+
result = generate_video(request.prompt, request.model_name, request.num_frames, request.num_inference_steps)
|
| 637 |
+
|
| 638 |
+
if isinstance(result, str) and result.startswith("Error"):
|
| 639 |
+
raise HTTPException(status_code=500, detail=result)
|
| 640 |
+
|
| 641 |
+
# Convertir frames a video (simplificado)
|
| 642 |
+
# En producci贸n, usar铆as ffmpeg para crear un video real
|
| 643 |
+
return {"video": "video_data_placeholder", "frames": len(result) if isinstance(result, list) else 0}
|
| 644 |
+
except Exception as e:
|
| 645 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 646 |
+
|
| 647 |
+
@app.post("/chat")
|
| 648 |
+
async def api_chat(request: ChatRequest):
|
| 649 |
+
try:
|
| 650 |
+
result = chat_with_model(request.message, request.history, request.model_name)
|
| 651 |
+
return {"response": result[-1]["content"] if result else "No response"}
|
| 652 |
+
except Exception as e:
|
| 653 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 654 |
+
|
| 655 |
+
# Montar la API en el mismo servidor que Gradio
|
| 656 |
+
demo.app = app
|
requirements.txt
CHANGED
|
@@ -11,4 +11,7 @@ xformers>=0.0.20
|
|
| 11 |
huggingface_hub>=0.19.0
|
| 12 |
opencv-python>=4.8.0
|
| 13 |
imageio>=2.31.0
|
| 14 |
-
imageio-ffmpeg>=0.4.8
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
huggingface_hub>=0.19.0
|
| 12 |
opencv-python>=4.8.0
|
| 13 |
imageio>=2.31.0
|
| 14 |
+
imageio-ffmpeg>=0.4.8
|
| 15 |
+
fastapi>=0.104.0
|
| 16 |
+
uvicorn>=0.24.0
|
| 17 |
+
pydantic>=2.0.0
|