Spaces:
Runtime error
Runtime error
import os | |
import json | |
import uuid | |
import httpx | |
import gradio as gr | |
import torch | |
from fastapi import FastAPI, HTTPException, Request | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
import uvicorn | |
import asyncio | |
# β Reduce memory usage by setting float16 precision | |
torch.set_default_dtype(torch.float16) | |
# β Hugging Face API Token | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
MODEL_NAME = "hpyapali/tinyllama-workout" | |
event_store = {} # Store AI responses for polling fallback | |
# β Webhook URL (Your Vapor Webhook Server) | |
WEBHOOK_URL = "https://694a-50-35-76-93.ngrok-free.app/fineTuneModel" | |
app = FastAPI() | |
# β Lazy Load AI Model (prevents timeout on Hugging Face) | |
pipe = None | |
def get_pipeline(): | |
global pipe | |
if pipe is None: | |
try: | |
print("π Loading AI Model...") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
token=HF_TOKEN, | |
torch_dtype=torch.float16, # Lower memory usage | |
device_map="auto" # Load on available device (CPU/GPU) | |
) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
print("β AI Model Loaded Successfully!") | |
except Exception as e: | |
print(f"β Error loading model: {e}") | |
pipe = None | |
return pipe | |
# β AI Function - Processes and ranks workouts | |
def analyze_workouts(last_workouts: str): | |
pipe = get_pipeline() | |
if pipe is None: | |
print("β AI model is not loaded.") | |
return "β AI model not loaded." | |
if not last_workouts.strip(): | |
print("β Empty workout data received!") | |
return "β No workout data provided." | |
instruction = ( | |
"You are a fitness AI assistant. Rank these workouts by heart rate recovery:" | |
f"\n\n{last_workouts}\n\nOnly return rankings. No extra text." | |
) | |
print(f"π¨ Sending prompt to AI: {instruction}") | |
try: | |
result = pipe(instruction, max_new_tokens=200, temperature=0.3, top_p=0.9) | |
if not result or "generated_text" not in result[0]: | |
print("β AI response is empty or malformed!") | |
return "β AI did not return a valid response." | |
response_text = result[0]["generated_text"].strip() | |
print(f"π AI Response: {response_text}") | |
return response_text | |
except Exception as e: | |
print(f"β AI Error: {str(e)}") | |
return f"β Error: {str(e)}" | |
# β API Route for Processing Workout Data | |
async def process_workout_request(request: Request): | |
try: | |
req_body = await request.json() | |
print("π© RAW REQUEST FROM HF:", req_body) | |
if "data" not in req_body or not isinstance(req_body["data"], list): | |
raise HTTPException(status_code=400, detail="Invalid request format.") | |
last_workouts = req_body["data"][0] | |
event_id = str(uuid.uuid4()) | |
print(f"β Processing AI Request - Event ID: {event_id}") | |
response_text = analyze_workouts(last_workouts) | |
if response_text and response_text.strip(): | |
event_store[event_id] = response_text | |
print(f"π Stored event: {event_id} β {response_text}") | |
else: | |
print("β AI did not generate a valid response. Not storing event.") | |
return {"event_id": event_id} | |
except Exception as e: | |
print(f"β Error processing request: {e}") | |
raise HTTPException(status_code=500, detail=str(e)) | |
# β Polling Endpoint (If Webhook Fails) | |
async def poll(event_id: str): | |
"""Fetches stored AI response for a given event ID.""" | |
print(f"π Polling event ID: {event_id}") | |
if event_id in event_store: | |
print(f"β Returning stored response: {event_store[event_id]}") | |
return {"data": [event_store.pop(event_id)]} | |
print("β Event ID not found in event_store") | |
return {"detail": "Not Found"} | |
# β Health Check | |
async def root(): | |
return {"message": "Workout Analysis & Ranking AI is running!"} | |
# β Start FastAPI | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=7861) | |