hpyapali commited on
Commit
da62dd2
Β·
verified Β·
1 Parent(s): 2fe9c1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -80
app.py CHANGED
@@ -8,23 +8,15 @@ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
8
  import uvicorn
9
  import asyncio
10
 
11
- # βœ… Securely Load Hugging Face Token
12
- HF_TOKEN = os.getenv("HF_TOKEN")
13
- if not HF_TOKEN:
14
- raise ValueError("❌ HF_TOKEN not found! Set it in Hugging Face Secrets.")
15
-
16
  # βœ… Load Model Configuration
 
17
  MODEL_NAME = "hpyapali/tinyllama-workout"
18
- event_store = {} # Store AI responses with event_id
19
 
20
  app = FastAPI()
21
 
22
- # βœ… Log server restart
23
- print("πŸ”„ Restarting Hugging Face AI Model Server...")
24
-
25
  # βœ… Load AI Model
26
  try:
27
- print("πŸ”„ Loading AI Model...")
28
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
29
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN)
30
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
@@ -34,7 +26,7 @@ except Exception as e:
34
  pipe = None
35
 
36
 
37
- # βœ… AI Function - Analyzes workout data
38
  def analyze_workouts(last_workouts: str):
39
  """Generates AI-based workout rankings based on heart rate recovery."""
40
  if pipe is None:
@@ -44,38 +36,16 @@ def analyze_workouts(last_workouts: str):
44
  return "❌ No workout data provided."
45
 
46
  instruction = (
47
- "You are a fitness AI assistant. Rank the following workouts based on heart rate recovery after 2 minutes."
48
- "\n\n### Ranking Rules:"
49
- "\n- A **larger heart rate dip** indicates better recovery."
50
- "\n- If two workouts have the same HR dip, **rank by highest peak HR**."
51
- "\n\n### Workouts Data:\n"
52
- f"{last_workouts}"
53
- "\n\n### Output Format (Rank from best to worst, no explanation, just rankings):"
54
- "\n1. Best: Running - HR dip: 28 bpm"
55
- "\n2. Cycling - HR dip: 25 bpm"
56
- "\n3. Rowing - HR dip: 22 bpm"
57
- "\n4. Strength Training - HR dip: 18 bpm"
58
- "\n5. Walking - HR dip: 12 bpm"
59
- "\n6. Yoga - HR dip: 8 bpm"
60
  )
61
 
62
  try:
63
- result = pipe(
64
- instruction,
65
- max_new_tokens=250,
66
- temperature=0.3,
67
- top_p=0.9,
68
- do_sample=True,
69
- return_full_text=False
70
- )
71
-
72
- if not result or not result[0].get("generated_text", "").strip():
73
- return "❌ AI did not generate a valid response."
74
-
75
- return result[0]["generated_text"].strip()
76
-
77
  except Exception as e:
78
- return f"❌ Error generating workout recommendation: {str(e)}"
79
 
80
 
81
  # βœ… API Route for Processing Workout Data
@@ -86,22 +56,28 @@ async def process_workout_request(request: Request):
86
  print("πŸ“© RAW REQUEST FROM HF:", req_body)
87
 
88
  if "data" not in req_body or not isinstance(req_body["data"], list):
89
- raise HTTPException(status_code=400, detail="Invalid request format: 'data' must be a list.")
90
 
91
  last_workouts = req_body["data"][0]
92
  event_id = str(uuid.uuid4())
93
-
94
  print(f"βœ… Processing AI Request - Event ID: {event_id}")
95
 
96
  response_text = analyze_workouts(last_workouts)
97
 
 
98
  event_store[event_id] = response_text
99
 
100
- webhook_url = req_body.get("webhook_url")
101
- if webhook_url:
102
- print(f"πŸ“‘ Sending response to Webhook: {webhook_url}")
103
- async with httpx.AsyncClient() as client:
104
- await client.post(webhook_url, json={"event_id": event_id, "data": [response_text]})
 
 
 
 
 
 
105
 
106
  return {"event_id": event_id}
107
 
@@ -110,7 +86,7 @@ async def process_workout_request(request: Request):
110
  raise HTTPException(status_code=500, detail=str(e))
111
 
112
 
113
- # βœ… Polling API (If Webhook Fails)
114
  @app.get("/gradio_api/poll/{event_id}")
115
  async def poll(event_id: str):
116
  """Fetches stored AI response for a given event ID."""
@@ -119,43 +95,12 @@ async def poll(event_id: str):
119
  return {"detail": "Not Found"}
120
 
121
 
122
- # βœ… Webhook Receiver (For Debugging Webhook Calls)
123
- @app.post("/fineTuneModel")
124
- async def receive_webhook(request: Request):
125
- """Handles webhook responses (useful for debugging webhook calls)."""
126
- try:
127
- req_body = await request.json()
128
- print("πŸ“© Webhook Received:", req_body)
129
- return {"status": "success", "received": req_body}
130
- except Exception as e:
131
- return {"error": str(e)}
132
-
133
-
134
  # βœ… Health Check
135
  @app.get("/")
136
  async def root():
137
  return {"message": "Workout Analysis & Ranking AI is running!"}
138
 
139
 
140
- # βœ… Gradio UI for Testing
141
- iface = gr.Interface(
142
- fn=analyze_workouts,
143
- inputs="text",
144
- outputs="text",
145
- title="Workout Analysis & Ranking AI",
146
- description="Enter workout data to analyze effectiveness, rank workouts, and receive improvement recommendations."
147
- )
148
-
149
-
150
- # βœ… Start Both FastAPI & Gradio
151
- def start_gradio():
152
- iface.launch(server_name="0.0.0.0", server_port=7860, share=True)
153
-
154
- def start_fastapi():
155
- uvicorn.run(app, host="0.0.0.0", port=7861)
156
-
157
- # βœ… Run both servers in parallel
158
  if __name__ == "__main__":
159
- import threading
160
- threading.Thread(target=start_gradio).start()
161
- threading.Thread(target=start_fastapi).start()
 
8
  import uvicorn
9
  import asyncio
10
 
 
 
 
 
 
11
  # βœ… Load Model Configuration
12
+ HF_TOKEN = os.getenv("HF_TOKEN")
13
  MODEL_NAME = "hpyapali/tinyllama-workout"
14
+ event_store = {}
15
 
16
  app = FastAPI()
17
 
 
 
 
18
  # βœ… Load AI Model
19
  try:
 
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
21
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN)
22
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
26
  pipe = None
27
 
28
 
29
+ # βœ… AI Function - Processes and ranks workouts
30
  def analyze_workouts(last_workouts: str):
31
  """Generates AI-based workout rankings based on heart rate recovery."""
32
  if pipe is None:
 
36
  return "❌ No workout data provided."
37
 
38
  instruction = (
39
+ "You are a fitness AI assistant. Rank these workouts by heart rate recovery:"
40
+ f"\n\n{last_workouts}\n\nOnly return rankings. No extra text."
 
 
 
 
 
 
 
 
 
 
 
41
  )
42
 
43
  try:
44
+ result = pipe(instruction, max_new_tokens=200, temperature=0.3, top_p=0.9)
45
+ response_text = result[0]["generated_text"].strip()
46
+ return response_text
 
 
 
 
 
 
 
 
 
 
 
47
  except Exception as e:
48
+ return f"❌ Error: {str(e)}"
49
 
50
 
51
  # βœ… API Route for Processing Workout Data
 
56
  print("πŸ“© RAW REQUEST FROM HF:", req_body)
57
 
58
  if "data" not in req_body or not isinstance(req_body["data"], list):
59
+ raise HTTPException(status_code=400, detail="Invalid request format.")
60
 
61
  last_workouts = req_body["data"][0]
62
  event_id = str(uuid.uuid4())
 
63
  print(f"βœ… Processing AI Request - Event ID: {event_id}")
64
 
65
  response_text = analyze_workouts(last_workouts)
66
 
67
+ # βœ… Store response for polling fallback
68
  event_store[event_id] = response_text
69
 
70
+ # βœ… Send AI response to Vapor Webhook
71
+ webhook_url = "https://694a-50-35-76-93.ngrok-free.app/fineTuneModel"
72
+
73
+ async with httpx.AsyncClient() as client:
74
+ try:
75
+ webhook_response = await client.post(webhook_url, json={"event_id": event_id, "data": [response_text]})
76
+ webhook_response.raise_for_status()
77
+ print(f"βœ… Webhook sent successfully: {webhook_response.json()}")
78
+ except Exception as e:
79
+ print(f"⚠️ Webhook failed: {e}")
80
+ print("πŸ”„ Switching to Polling Mode...")
81
 
82
  return {"event_id": event_id}
83
 
 
86
  raise HTTPException(status_code=500, detail=str(e))
87
 
88
 
89
+ # βœ… Polling Endpoint (If Webhook Fails)
90
  @app.get("/gradio_api/poll/{event_id}")
91
  async def poll(event_id: str):
92
  """Fetches stored AI response for a given event ID."""
 
95
  return {"detail": "Not Found"}
96
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  # βœ… Health Check
99
  @app.get("/")
100
  async def root():
101
  return {"message": "Workout Analysis & Ranking AI is running!"}
102
 
103
 
104
+ # βœ… Start FastAPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  if __name__ == "__main__":
106
+ uvicorn.run(app, host="0.0.0.0", port=7861)