Spaces:
Runtime error
Runtime error
import os | |
import json | |
import uuid | |
import httpx | |
import gradio as gr | |
from fastapi import FastAPI, HTTPException, Request | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
import uvicorn | |
import asyncio | |
# β Securely Load Hugging Face Token | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
raise ValueError("β HF_TOKEN not found! Set it in Hugging Face Secrets.") | |
# β Load Model Configuration | |
MODEL_NAME = "hpyapali/tinyllama-workout" | |
event_store = {} # Store AI responses with event_id | |
app = FastAPI() | |
# β Log server restart | |
print("π Restarting Hugging Face AI Model Server...") | |
# β Load AI Model | |
try: | |
print("π Loading AI Model...") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
print("β AI Model Loaded Successfully!") | |
except Exception as e: | |
print(f"β Error loading model: {e}") | |
pipe = None | |
# β AI Function - Analyzes workout data | |
def analyze_workouts(last_workouts: str): | |
"""Generates AI-based workout rankings based on heart rate recovery.""" | |
if pipe is None: | |
return "β AI model is not loaded." | |
if not last_workouts.strip(): | |
return "β No workout data provided." | |
instruction = ( | |
"You are a fitness AI assistant. Rank the following workouts based on heart rate recovery after 2 minutes." | |
"\n\n### Ranking Rules:" | |
"\n- A **larger heart rate dip** indicates better recovery." | |
"\n- If two workouts have the same HR dip, **rank by highest peak HR**." | |
"\n\n### Workouts Data:\n" | |
f"{last_workouts}" | |
"\n\n### Output Format (Rank from best to worst, no explanation, just rankings):" | |
"\n1. Best: Running - HR dip: 28 bpm" | |
"\n2. Cycling - HR dip: 25 bpm" | |
"\n3. Rowing - HR dip: 22 bpm" | |
"\n4. Strength Training - HR dip: 18 bpm" | |
"\n5. Walking - HR dip: 12 bpm" | |
"\n6. Yoga - HR dip: 8 bpm" | |
) | |
try: | |
result = pipe( | |
instruction, | |
max_new_tokens=250, | |
temperature=0.3, | |
top_p=0.9, | |
do_sample=True, | |
return_full_text=False | |
) | |
if not result or not result[0].get("generated_text", "").strip(): | |
return "β AI did not generate a valid response." | |
return result[0]["generated_text"].strip() | |
except Exception as e: | |
return f"β Error generating workout recommendation: {str(e)}" | |
# β API Route for Processing Workout Data | |
async def process_workout_request(request: Request): | |
try: | |
req_body = await request.json() | |
print("π© RAW REQUEST FROM HF:", req_body) | |
if "data" not in req_body or not isinstance(req_body["data"], list): | |
raise HTTPException(status_code=400, detail="Invalid request format: 'data' must be a list.") | |
last_workouts = req_body["data"][0] | |
event_id = str(uuid.uuid4()) | |
print(f"β Processing AI Request - Event ID: {event_id}") | |
response_text = analyze_workouts(last_workouts) | |
event_store[event_id] = response_text | |
webhook_url = req_body.get("webhook_url") | |
if webhook_url: | |
print(f"π‘ Sending response to Webhook: {webhook_url}") | |
async with httpx.AsyncClient() as client: | |
await client.post(webhook_url, json={"event_id": event_id, "data": [response_text]}) | |
return {"event_id": event_id} | |
except Exception as e: | |
print(f"β Error processing request: {e}") | |
raise HTTPException(status_code=500, detail=str(e)) | |
# β Polling API (If Webhook Fails) | |
async def poll(event_id: str): | |
"""Fetches stored AI response for a given event ID.""" | |
if event_id in event_store: | |
return {"data": [event_store.pop(event_id)]} | |
return {"detail": "Not Found"} | |
# β Webhook Receiver (For Debugging Webhook Calls) | |
async def receive_webhook(request: Request): | |
"""Handles webhook responses (useful for debugging webhook calls).""" | |
try: | |
req_body = await request.json() | |
print("π© Webhook Received:", req_body) | |
return {"status": "success", "received": req_body} | |
except Exception as e: | |
return {"error": str(e)} | |
# β Health Check | |
async def root(): | |
return {"message": "Workout Analysis & Ranking AI is running!"} | |
# β Gradio UI for Testing | |
iface = gr.Interface( | |
fn=analyze_workouts, | |
inputs="text", | |
outputs="text", | |
title="Workout Analysis & Ranking AI", | |
description="Enter workout data to analyze effectiveness, rank workouts, and receive improvement recommendations." | |
) | |
# β Start Both FastAPI & Gradio | |
def start_gradio(): | |
iface.launch(server_name="0.0.0.0", server_port=7860, share=True) | |
def start_fastapi(): | |
uvicorn.run(app, host="0.0.0.0", port=7861) | |
# β Run both servers in parallel | |
if __name__ == "__main__": | |
import threading | |
threading.Thread(target=start_gradio).start() | |
threading.Thread(target=start_fastapi).start() | |