Spaces:
Running
Running
from fastapi import FastAPI, Request | |
from fastapi.templating import Jinja2Templates | |
from fastapi.staticfiles import StaticFiles | |
from fastapi.responses import HTMLResponse | |
from transformers import pipeline | |
from .config import settings | |
from pydantic import BaseModel | |
app = FastAPI( | |
title="DeepSeek Chat", | |
description="A chat API using DeepSeek model", | |
version="1.0.0" | |
) | |
# Mount static files and templates | |
app.mount("/static", StaticFiles(directory="app/static"), name="static") | |
templates = Jinja2Templates(directory="app/templates") | |
# Initialize pipeline | |
print("Loading model pipeline...") | |
pipe = pipeline( | |
"text-generation", | |
model=settings.MODEL_NAME, | |
token=settings.HUGGINGFACE_TOKEN, | |
trust_remote_code=True | |
) | |
class ChatMessage(BaseModel): | |
message: str | |
async def home(request: Request): | |
return templates.TemplateResponse("chat.html", {"request": request}) | |
async def chat(message: ChatMessage): | |
# Prepare messages | |
messages = [ | |
{"role": "user", "content": message.message} | |
] | |
# Generate response using pipeline | |
response = pipe(messages) | |
print(response) | |
# Extract the response text | |
if isinstance(response, list): | |
response_text = response[0].get('generated_text', '') | |
print(response_text) | |
else: | |
response_text = response.get('generated_text', '') | |
print(response_text) | |
return {"response": response_text[-1]['content']} | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) |