hpyapali commited on
Commit
12d03e0
Β·
verified Β·
1 Parent(s): 9477700

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -39
app.py CHANGED
@@ -46,13 +46,14 @@ def get_pipeline():
46
 
47
 
48
  # βœ… AI Function - Processes and ranks workouts
49
-
50
  def analyze_workouts(last_workouts: str):
51
  pipe = get_pipeline()
52
  if pipe is None:
53
- return "❌ AI model is not loaded."
 
54
 
55
  if not last_workouts.strip():
 
56
  return "❌ No workout data provided."
57
 
58
  instruction = (
@@ -60,7 +61,7 @@ def analyze_workouts(last_workouts: str):
60
  f"\n\n{last_workouts}\n\nOnly return rankings. No extra text."
61
  )
62
 
63
- print(f"πŸ“¨ Sending prompt to AI: {instruction}") # βœ… Debug log
64
 
65
  try:
66
  result = pipe(instruction, max_new_tokens=200, temperature=0.3, top_p=0.9)
@@ -69,11 +70,11 @@ def analyze_workouts(last_workouts: str):
69
  return "❌ AI did not return a valid response."
70
 
71
  response_text = result[0]["generated_text"].strip()
72
- print(f"πŸ” AI Response: {response_text}") # βœ… Debug log
73
 
74
  return response_text
75
  except Exception as e:
76
- print(f"❌ AI Error: {str(e)}") # βœ… Debug AI errors
77
  return f"❌ Error: {str(e)}"
78
 
79
 
@@ -93,20 +94,13 @@ async def process_workout_request(request: Request):
93
 
94
  response_text = analyze_workouts(last_workouts)
95
 
96
- # βœ… Store response for polling fallback
97
- event_store[event_id] = response_text
98
-
99
- # βœ… Send AI response to Vapor Webhook
100
- async with httpx.AsyncClient() as client:
101
- try:
102
- webhook_response = await client.post(WEBHOOK_URL, json={"event_id": event_id, "data": [response_text]})
103
- webhook_response.raise_for_status()
104
- print(f"βœ… Webhook sent successfully: {webhook_response.json()}")
105
- except Exception as e:
106
- print(f"⚠️ Webhook failed: {e}")
107
- print("πŸ”„ Switching to Polling Mode...")
108
 
109
- return {"event_id": event_id}
110
 
111
  except Exception as e:
112
  print(f"❌ Error processing request: {e}")
@@ -117,8 +111,13 @@ async def process_workout_request(request: Request):
117
  @app.get("/gradio_api/poll/{event_id}")
118
  async def poll(event_id: str):
119
  """Fetches stored AI response for a given event ID."""
 
 
120
  if event_id in event_store:
 
121
  return {"data": [event_store.pop(event_id)]}
 
 
122
  return {"detail": "Not Found"}
123
 
124
 
@@ -128,25 +127,6 @@ async def root():
128
  return {"message": "Workout Analysis & Ranking AI is running!"}
129
 
130
 
131
- # βœ… Gradio UI for Testing
132
- iface = gr.Interface(
133
- fn=analyze_workouts,
134
- inputs="text",
135
- outputs="text",
136
- title="Workout Analysis & Ranking AI",
137
- description="Enter workout data to analyze effectiveness, rank workouts, and receive improvement recommendations."
138
- )
139
-
140
-
141
- # βœ… Start Both FastAPI & Gradio
142
- def start_gradio():
143
- iface.launch(server_name="0.0.0.0", server_port=7860, share=True)
144
-
145
- def start_fastapi():
146
- uvicorn.run(app, host="0.0.0.0", port=7861)
147
-
148
- # βœ… Run both servers in parallel
149
  if __name__ == "__main__":
150
- import threading
151
- threading.Thread(target=start_gradio).start()
152
- threading.Thread(target=start_fastapi).start()
 
46
 
47
 
48
  # βœ… AI Function - Processes and ranks workouts
 
49
  def analyze_workouts(last_workouts: str):
50
  pipe = get_pipeline()
51
  if pipe is None:
52
+ print("❌ AI model is not loaded.")
53
+ return "❌ AI model not loaded."
54
 
55
  if not last_workouts.strip():
56
+ print("❌ Empty workout data received!")
57
  return "❌ No workout data provided."
58
 
59
  instruction = (
 
61
  f"\n\n{last_workouts}\n\nOnly return rankings. No extra text."
62
  )
63
 
64
+ print(f"πŸ“¨ Sending prompt to AI: {instruction}")
65
 
66
  try:
67
  result = pipe(instruction, max_new_tokens=200, temperature=0.3, top_p=0.9)
 
70
  return "❌ AI did not return a valid response."
71
 
72
  response_text = result[0]["generated_text"].strip()
73
+ print(f"πŸ” AI Response: {response_text}")
74
 
75
  return response_text
76
  except Exception as e:
77
+ print(f"❌ AI Error: {str(e)}")
78
  return f"❌ Error: {str(e)}"
79
 
80
 
 
94
 
95
  response_text = analyze_workouts(last_workouts)
96
 
97
+ if response_text and response_text.strip():
98
+ event_store[event_id] = response_text
99
+ print(f"πŸ“ Stored event: {event_id} β†’ {response_text}")
100
+ else:
101
+ print("❌ AI did not generate a valid response. Not storing event.")
 
 
 
 
 
 
 
102
 
103
+ return {"event_id": event_id}
104
 
105
  except Exception as e:
106
  print(f"❌ Error processing request: {e}")
 
111
  @app.get("/gradio_api/poll/{event_id}")
112
  async def poll(event_id: str):
113
  """Fetches stored AI response for a given event ID."""
114
+ print(f"πŸ” Polling event ID: {event_id}")
115
+
116
  if event_id in event_store:
117
+ print(f"βœ… Returning stored response: {event_store[event_id]}")
118
  return {"data": [event_store.pop(event_id)]}
119
+
120
+ print("❌ Event ID not found in event_store")
121
  return {"detail": "Not Found"}
122
 
123
 
 
127
  return {"message": "Workout Analysis & Ranking AI is running!"}
128
 
129
 
130
+ # βœ… Start FastAPI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  if __name__ == "__main__":
132
+ uvicorn.run(app, host="0.0.0.0", port=7861)