github-actions[bot] commited on
Commit
3a07545
·
1 Parent(s): b52102b

🤖 Auto-deploy from GitHub (push) - 7eb8444 - 2025-07-27 15:36:09 UTC

Browse files
Files changed (33) hide show
  1. .env.example +1 -1
  2. README.md +20 -95
  3. README_HF.md +32 -0
  4. apps/gradio-app/README.md +24 -0
  5. apps/gradio-app/requirements.txt +89 -0
  6. apps/gradio-app/src/fitness_gradio/__init__.py +9 -0
  7. apps/gradio-app/src/fitness_gradio/examples/__init__.py +10 -0
  8. apps/gradio-app/src/fitness_gradio/examples/demo.py +134 -0
  9. apps/gradio-app/src/fitness_gradio/main.py +52 -0
  10. apps/gradio-app/src/fitness_gradio/ui/__init__.py +13 -0
  11. apps/gradio-app/src/fitness_gradio/ui/app.py +123 -0
  12. apps/gradio-app/src/fitness_gradio/ui/components.py +139 -0
  13. apps/gradio-app/src/fitness_gradio/ui/handlers.py +384 -0
  14. apps/gradio-app/src/fitness_gradio/ui/styles.py +162 -0
  15. fitness_agent/__init__.py +7 -0
  16. fitness_agent/app.py +60 -1261
  17. fitness_agent/fitness_agent.py +80 -387
  18. requirements.txt +10 -4
  19. shared/README.md +20 -0
  20. shared/requirements.txt +59 -0
  21. shared/src/fitness_core/__init__.py +30 -0
  22. shared/src/fitness_core/agents/__init__.py +15 -0
  23. shared/src/fitness_core/agents/base.py +109 -0
  24. shared/src/fitness_core/agents/models.py +34 -0
  25. shared/src/fitness_core/agents/providers.py +298 -0
  26. shared/src/fitness_core/services/__init__.py +24 -0
  27. shared/src/fitness_core/services/agent_runner.py +246 -0
  28. shared/src/fitness_core/services/conversation.py +93 -0
  29. shared/src/fitness_core/services/exceptions.py +28 -0
  30. shared/src/fitness_core/services/formatters.py +206 -0
  31. shared/src/fitness_core/utils/__init__.py +11 -0
  32. shared/src/fitness_core/utils/config.py +79 -0
  33. shared/src/fitness_core/utils/logging.py +63 -0
.env.example CHANGED
@@ -14,7 +14,7 @@ ANTHROPIC_API_KEY=your_anthropic_key_here
14
  # OPENAI_API_KEY=your_openai_api_key_here
15
  # ANTHROPIC_API_KEY=your_anthropic_key_here
16
 
17
- # Optional: Set default model (will use claude-3.5-haiku if not set)
18
  # AI_MODEL=gpt-4o-mini
19
  # AI_MODEL=claude-3.5-sonnet
20
  # AI_MODEL=gpt-4o
 
14
  # OPENAI_API_KEY=your_openai_api_key_here
15
  # ANTHROPIC_API_KEY=your_anthropic_key_here
16
 
17
+ # Optional: Set default model (will use gpt-4o-mini if not set)
18
  # AI_MODEL=gpt-4o-mini
19
  # AI_MODEL=claude-3.5-sonnet
20
  # AI_MODEL=gpt-4o
README.md CHANGED
@@ -1,107 +1,32 @@
1
- ---
2
- title: Fitness AI Assistant
3
- emoji: 🏋️‍♀️
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.38.1
8
- app_file: fitness_agent/app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- # 🏋️‍♀️ Fitness AI Assistant
14
-
15
- Your personal fitness companion for workout plans, meal planning, and fitness guidance powered by **multiple AI providers** - choose between Anthropic Claude and OpenAI GPT models!
16
-
17
- ## ✨ Features
18
-
19
- - **🏋️ Personalized Workout Plans**: Custom routines based on your fitness level and goals
20
- - **🥗 Meal Planning**: Tailored nutrition plans for weight loss, muscle gain, or general health
21
- - **💡 Fitness Guidance**: Expert advice on exercises, form, and best practices
22
- - **🤖 Multiple AI Providers**: Choose from Anthropic Claude OR OpenAI GPT models
23
- - **⚡ Model Flexibility**: Switch between models anytime for different capabilities
24
- - **💬 Interactive Chat**: Conversational interface with memory and context
25
- - **🔄 Real-time Streaming**: See responses generated live
26
-
27
- ## 🤖 Supported AI Models
28
-
29
- ### 🔵 Anthropic Claude Models
30
- - **Claude-4**: claude-4-opus, claude-4-sonnet (Premium, most capable)
31
- - **Claude-3.7**: claude-3.7-sonnet (Extended thinking)
32
- - **Claude-3.5**: claude-3.5-sonnet, claude-3.5-haiku (Balanced, fast)
33
- - **Claude-3**: claude-3-haiku (Cost-effective)
34
 
35
- ### 🟢 OpenAI GPT Models
36
- - **GPT-4o**: gpt-4o, gpt-4o-mini (Latest with vision)
37
- - **GPT-4**: gpt-4-turbo (Large context window)
38
- - **GPT-3.5**: gpt-3.5-turbo (Fast and economical)
39
- - **Reasoning**: o1-preview, o1-mini, o3-mini (Advanced reasoning)
40
 
41
- ## 🚀 Quick Start
42
 
43
- ### Option 1: Run Locally
44
- ```bash
45
- # Clone the repository
46
- git clone <your-repo-url>
47
- cd fitness-app
48
 
49
- # Install dependencies
50
- pip install -r requirements.txt
51
 
52
- # Set up environment - choose your provider(s)
53
- cp .env.example .env
 
 
54
 
55
- # Edit .env and add your API key(s):
56
- # For OpenAI models: OPENAI_API_KEY=your_openai_key_here
57
- # For Anthropic models: ANTHROPIC_API_KEY=your_anthropic_key_here
58
- # For both providers: Set both keys!
59
 
60
- # Launch the app
61
- python fitness_agent/app.py
62
- ```
 
63
 
64
- ### Option 2: Use the Interface
65
- 1. **Select your AI provider and model** from the dropdown
66
- - 🔵 Anthropic models for detailed analysis and safety
67
- - 🟢 OpenAI models for familiar interface and vision capabilities
68
- 2. **Start chatting** about your fitness goals
69
- 3. **Be specific** about your level, equipment, and preferences
70
- 4. **Get personalized plans** and ask follow-up questions
71
 
72
- ## 🎯 Example Prompts
73
-
74
- - "Create a beginner workout plan for me"
75
- - "I want to lose weight - help me with a fitness plan"
76
- - "Design a muscle building program for intermediate level"
77
- - "I need a meal plan for gaining muscle mass"
78
- - "Help me with a home workout routine with no equipment"
79
-
80
- ## 🤖 AI Model Options
81
-
82
- | Model | Speed | Capability | Best For |
83
- |-------|--------|------------|----------|
84
- | claude-3.5-haiku | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | Quick questions, cost-effective (default) |
85
- | claude-3.5-sonnet | ⭐⭐⭐⭐ | ⭐⭐⭐⭐ | Balanced performance, recommended |
86
- | claude-3.7-sonnet | ⭐⭐⭐⭐ | ⭐⭐⭐⭐ | Extended thinking, complex plans |
87
- | claude-4-sonnet | ⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | High performance (premium) |
88
- | claude-4-opus | ⭐⭐⭐ | ⭐⭐⭐⭐⭐ | Maximum capability (premium) |
89
-
90
- ## 📚 Documentation
91
-
92
- - **[Complete Model Guide](fitness_agent/COMPLETE_MODEL_GUIDE.md)** - Everything about AI models
93
- - **[Examples](fitness_agent/examples.py)** - Code examples for different use cases
94
- - **[Test Script](fitness_agent/test_updated_models.py)** - Test model availability
95
-
96
- ## 🛠️ Tech Stack
97
-
98
- - **Backend**: Python, LiteLLM, Anthropic API
99
- - **Frontend**: Gradio
100
- - **AI Models**: Anthropic Claude (3.5-Haiku to 4-Opus)
101
- - **Features**: Real-time streaming, conversation memory, model switching
102
 
103
  ---
104
 
105
- *Built with ❤️ for fitness enthusiasts*
106
-
107
- *Start your fitness journey today with personalized AI guidance!*
 
1
+ # Fitness AI Assistant
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ Your personal AI-powered fitness and nutrition coach built with Gradio.
 
 
 
 
4
 
5
+ ## Features
6
 
7
+ - **Personalized Fitness Plans**: Get customized workout routines based on your goals and fitness level
8
+ - **Nutrition Guidance**: Receive tailored dietary advice and meal planning suggestions
9
+ - **Progress Tracking**: Monitor your fitness journey with AI-powered insights
10
+ - **Expert Knowledge**: Access evidence-based fitness and nutrition information
 
11
 
12
+ ## How to Use
 
13
 
14
+ 1. Start a conversation by describing your fitness goals
15
+ 2. Ask questions about workouts, nutrition, or health
16
+ 3. Get personalized recommendations and guidance
17
+ 4. Follow up with specific questions for detailed advice
18
 
19
+ ## Technology
 
 
 
20
 
21
+ This app is built using:
22
+ - **Gradio** for the web interface
23
+ - **OpenAI GPT models** for intelligent conversations
24
+ - **Custom fitness knowledge base** for specialized guidance
25
 
26
+ ## Getting Started
 
 
 
 
 
 
27
 
28
+ Simply type your fitness-related question or goal in the chat interface below!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  ---
31
 
32
+ *Note: This AI assistant provides general fitness and nutrition guidance. Always consult with healthcare professionals for medical advice or before starting new exercise programs.*
 
 
README_HF.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fitness AI Assistant
2
+
3
+ Your personal AI-powered fitness and nutrition coach built with Gradio.
4
+
5
+ ## Features
6
+
7
+ - **Personalized Fitness Plans**: Get customized workout routines based on your goals and fitness level
8
+ - **Nutrition Guidance**: Receive tailored dietary advice and meal planning suggestions
9
+ - **Progress Tracking**: Monitor your fitness journey with AI-powered insights
10
+ - **Expert Knowledge**: Access evidence-based fitness and nutrition information
11
+
12
+ ## How to Use
13
+
14
+ 1. Start a conversation by describing your fitness goals
15
+ 2. Ask questions about workouts, nutrition, or health
16
+ 3. Get personalized recommendations and guidance
17
+ 4. Follow up with specific questions for detailed advice
18
+
19
+ ## Technology
20
+
21
+ This app is built using:
22
+ - **Gradio** for the web interface
23
+ - **OpenAI GPT models** for intelligent conversations
24
+ - **Custom fitness knowledge base** for specialized guidance
25
+
26
+ ## Getting Started
27
+
28
+ Simply type your fitness-related question or goal in the chat interface below!
29
+
30
+ ---
31
+
32
+ *Note: This AI assistant provides general fitness and nutrition guidance. Always consult with healthcare professionals for medical advice or before starting new exercise programs.*
apps/gradio-app/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fitness Gradio App
2
+
3
+ Web interface for the Fitness AI Assistant using Gradio.
4
+
5
+ ## Features
6
+
7
+ - Interactive chat interface
8
+ - Model selection (OpenAI/Anthropic)
9
+ - Real-time streaming responses
10
+ - Fitness plan generation
11
+ - Mobile-friendly design
12
+
13
+ ## Running the App
14
+
15
+ ```bash
16
+ poetry install
17
+ poetry run fitness-gradio
18
+ ```
19
+
20
+ Or:
21
+
22
+ ```bash
23
+ poetry run python -m fitness_gradio.main
24
+ ```
apps/gradio-app/requirements.txt ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -e file:///C:/Users/sdeer/OneDrive/Projects/fitness-app/shared ; python_version >= "3.12" and python_version < "4.0"
2
+ aiofiles==24.1.0 ; python_version >= "3.12" and python_version < "4.0"
3
+ aiohappyeyeballs==2.6.1 ; python_version >= "3.12" and python_version < "4.0"
4
+ aiohttp==3.12.14 ; python_version >= "3.12" and python_version < "4.0"
5
+ aiosignal==1.4.0 ; python_version >= "3.12" and python_version < "4.0"
6
+ annotated-types==0.7.0 ; python_version >= "3.12" and python_version < "4.0"
7
+ anyio==4.9.0 ; python_version >= "3.12" and python_version < "4.0"
8
+ attrs==25.3.0 ; python_version >= "3.12" and python_version < "4.0"
9
+ audioop-lts==0.2.1 ; python_version >= "3.13" and python_version < "4.0"
10
+ brotli==1.1.0 ; python_version >= "3.12" and python_version < "4.0"
11
+ certifi==2025.7.14 ; python_version >= "3.12" and python_version < "4.0"
12
+ charset-normalizer==3.4.2 ; python_version >= "3.12" and python_version < "4"
13
+ click==8.2.1 ; python_version >= "3.12" and python_version < "4.0"
14
+ colorama==0.4.6 ; python_version >= "3.12" and python_version < "4.0"
15
+ distro==1.9.0 ; python_version >= "3.12" and python_version < "4.0"
16
+ fastapi==0.116.1 ; python_version >= "3.12" and python_version < "4.0"
17
+ ffmpy==0.6.1 ; python_version >= "3.12" and python_version < "4.0"
18
+ filelock==3.18.0 ; python_version >= "3.12" and python_version < "4.0"
19
+ frozenlist==1.7.0 ; python_version >= "3.12" and python_version < "4.0"
20
+ fsspec==2025.7.0 ; python_version >= "3.12" and python_version < "4.0"
21
+ gradio-client==1.11.0 ; python_version >= "3.12" and python_version < "4.0"
22
+ gradio==5.38.2 ; python_version >= "3.12" and python_version < "4.0"
23
+ griffe==1.8.0 ; python_version >= "3.12" and python_version < "4.0"
24
+ groovy==0.1.2 ; python_version >= "3.12" and python_version < "4.0"
25
+ h11==0.16.0 ; python_version >= "3.12" and python_version < "4.0"
26
+ hf-xet==1.1.5 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "arm64" or platform_machine == "aarch64")
27
+ httpcore==1.0.9 ; python_version >= "3.12" and python_version < "4.0"
28
+ httpx-sse==0.4.1 ; python_version >= "3.12" and python_version < "4.0"
29
+ httpx==0.28.1 ; python_version >= "3.12" and python_version < "4.0"
30
+ huggingface-hub==0.34.1 ; python_version >= "3.12" and python_version < "4.0"
31
+ idna==3.10 ; python_version >= "3.12" and python_version < "4.0"
32
+ importlib-metadata==8.7.0 ; python_version >= "3.12" and python_version < "4.0"
33
+ jinja2==3.1.6 ; python_version >= "3.12" and python_version < "4.0"
34
+ jiter==0.10.0 ; python_version >= "3.12" and python_version < "4.0"
35
+ jsonschema-specifications==2025.4.1 ; python_version >= "3.12" and python_version < "4.0"
36
+ jsonschema==4.25.0 ; python_version >= "3.12" and python_version < "4.0"
37
+ litellm==1.74.8 ; python_version >= "3.12" and python_version < "4.0"
38
+ markdown-it-py==3.0.0 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
39
+ markupsafe==3.0.2 ; python_version >= "3.12" and python_version < "4.0"
40
+ mcp==1.12.2 ; python_version >= "3.12" and python_version < "4.0"
41
+ mdurl==0.1.2 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
42
+ multidict==6.6.3 ; python_version >= "3.12" and python_version < "4.0"
43
+ numpy==2.3.2 ; python_version >= "3.12" and python_version < "4.0"
44
+ openai-agents[litellm]==0.2.3 ; python_version >= "3.12" and python_version < "4.0"
45
+ openai==1.97.1 ; python_version >= "3.12" and python_version < "4.0"
46
+ orjson==3.11.1 ; python_version >= "3.12" and python_version < "4.0"
47
+ packaging==25.0 ; python_version >= "3.12" and python_version < "4.0"
48
+ pandas==2.3.1 ; python_version >= "3.12" and python_version < "4.0"
49
+ pillow==11.3.0 ; python_version >= "3.12" and python_version < "4.0"
50
+ propcache==0.3.2 ; python_version >= "3.12" and python_version < "4.0"
51
+ pydantic-core==2.33.2 ; python_version >= "3.12" and python_version < "4.0"
52
+ pydantic-settings==2.10.1 ; python_version >= "3.12" and python_version < "4.0"
53
+ pydantic==2.11.7 ; python_version >= "3.12" and python_version < "4.0"
54
+ pydub==0.25.1 ; python_version >= "3.12" and python_version < "4.0"
55
+ pygments==2.19.2 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
56
+ python-dateutil==2.9.0.post0 ; python_version >= "3.12" and python_version < "4.0"
57
+ python-dotenv==1.1.1 ; python_version >= "3.12" and python_version < "4.0"
58
+ python-multipart==0.0.20 ; python_version >= "3.12" and python_version < "4.0"
59
+ pytz==2025.2 ; python_version >= "3.12" and python_version < "4.0"
60
+ pywin32==311 ; python_version >= "3.12" and python_version < "4.0" and sys_platform == "win32"
61
+ pyyaml==6.0.2 ; python_version >= "3.12" and python_version < "4.0"
62
+ referencing==0.36.2 ; python_version >= "3.12" and python_version < "4.0"
63
+ regex==2024.11.6 ; python_version >= "3.12" and python_version < "4.0"
64
+ reportlab==4.4.3 ; python_version >= "3.12" and python_version < "4"
65
+ requests==2.32.4 ; python_version >= "3.12" and python_version < "4.0"
66
+ rich==14.1.0 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
67
+ rpds-py==0.26.0 ; python_version >= "3.12" and python_version < "4.0"
68
+ ruff==0.12.5 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
69
+ safehttpx==0.1.6 ; python_version >= "3.12" and python_version < "4.0"
70
+ semantic-version==2.10.0 ; python_version >= "3.12" and python_version < "4.0"
71
+ shellingham==1.5.4 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
72
+ six==1.17.0 ; python_version >= "3.12" and python_version < "4.0"
73
+ sniffio==1.3.1 ; python_version >= "3.12" and python_version < "4.0"
74
+ sse-starlette==3.0.2 ; python_version >= "3.12" and python_version < "4.0"
75
+ starlette==0.47.2 ; python_version >= "3.12" and python_version < "4.0"
76
+ tiktoken==0.9.0 ; python_version >= "3.12" and python_version < "4.0"
77
+ tokenizers==0.21.2 ; python_version >= "3.12" and python_version < "4.0"
78
+ tomlkit==0.13.3 ; python_version >= "3.12" and python_version < "4.0"
79
+ tqdm==4.67.1 ; python_version >= "3.12" and python_version < "4.0"
80
+ typer==0.16.0 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
81
+ types-requests==2.32.4.20250611 ; python_version >= "3.12" and python_version < "4.0"
82
+ typing-extensions==4.14.1 ; python_version >= "3.12" and python_version < "4.0"
83
+ typing-inspection==0.4.1 ; python_version >= "3.12" and python_version < "4.0"
84
+ tzdata==2025.2 ; python_version >= "3.12" and python_version < "4.0"
85
+ urllib3==2.5.0 ; python_version >= "3.12" and python_version < "4.0"
86
+ uvicorn==0.35.0 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
87
+ websockets==15.0.1 ; python_version >= "3.12" and python_version < "4.0"
88
+ yarl==1.20.1 ; python_version >= "3.12" and python_version < "4.0"
89
+ zipp==3.23.0 ; python_version >= "3.12" and python_version < "4.0"
apps/gradio-app/src/fitness_gradio/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fitness Gradio App - Web interface for the Fitness AI Assistant.
3
+ """
4
+
5
+ __version__ = "0.1.0"
6
+
7
+ from .ui import create_fitness_app
8
+
9
+ __all__ = ['create_fitness_app']
apps/gradio-app/src/fitness_gradio/examples/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Examples module for the fitness app.
3
+ """
4
+ from .demo import run_examples, example_agent_conversation, example_model_listing
5
+
6
+ __all__ = [
7
+ 'run_examples',
8
+ 'example_agent_conversation',
9
+ 'example_model_listing'
10
+ ]
apps/gradio-app/src/fitness_gradio/examples/demo.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Example usage and demonstration of the fitness app.
3
+ """
4
+ import asyncio
5
+ from agents import Runner
6
+
7
+ from ..agents import FitnessAgent
8
+ from ..utils import setup_logging, get_logger
9
+
10
+ # Setup logging for examples
11
+ setup_logging()
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ def example_model_listing():
16
+ """Example of listing available models."""
17
+ print("🤖 Available AI Models (Anthropic + OpenAI):")
18
+ print("=" * 60)
19
+
20
+ # Show models by provider
21
+ providers = FitnessAgent.get_models_by_provider()
22
+
23
+ print("🔵 ANTHROPIC MODELS:")
24
+ for name, full_id in providers["anthropic"].items():
25
+ print(f" • {name}: {full_id}")
26
+ print(f" {FitnessAgent.get_model_info(name)}")
27
+ print()
28
+
29
+ print("🟢 OPENAI MODELS:")
30
+ for name, full_id in providers["openai"].items():
31
+ print(f" • {name}: {full_id}")
32
+ print(f" {FitnessAgent.get_model_info(name)}")
33
+ print()
34
+
35
+ print("🎯 RECOMMENDED MODELS (most likely to work):")
36
+ recommended = FitnessAgent.get_recommended_models()
37
+ for model in recommended:
38
+ provider_icon = "🔵" if "claude" in model else "🟢" if any(x in model for x in ["gpt", "o1", "o3"]) else "⚪"
39
+ print(f" {provider_icon} {model}")
40
+
41
+
42
+ def example_agent_creation():
43
+ """Example of creating agents with different models."""
44
+ print("\n" + "="*60 + "\n")
45
+
46
+ # Create agent with default model
47
+ print("Creating agent with default model (gpt-4o-mini)...")
48
+ agent = FitnessAgent()
49
+ print(f"✅ Created agent:")
50
+ print(f" Model name: {agent.model_name}")
51
+ print(f" Provider: {agent.provider}")
52
+ print(f" Final model: {agent.final_model}")
53
+
54
+ print("\n" + "="*60 + "\n")
55
+
56
+ # Example with OpenAI model
57
+ print("Creating agent with OpenAI model (gpt-4o-mini)...")
58
+ try:
59
+ openai_agent = FitnessAgent("gpt-4o-mini")
60
+ print(f"✅ Created OpenAI agent:")
61
+ print(f" Model name: {openai_agent.model_name}")
62
+ print(f" Provider: {openai_agent.provider}")
63
+ print(f" Final model: {openai_agent.final_model}")
64
+ except Exception as e:
65
+ print(f"⚠️ Could not create OpenAI agent: {e}")
66
+ print(" (This is normal if you don't have OPENAI_API_KEY set)")
67
+
68
+
69
+ async def example_agent_conversation():
70
+ """Example of having a conversation with the agent."""
71
+ print("\n" + "="*60)
72
+ print("🗣️ EXAMPLE CONVERSATION")
73
+ print("="*60 + "\n")
74
+
75
+ try:
76
+ # Create agent
77
+ agent = FitnessAgent()
78
+ print(f"Using model: {agent.model_name}")
79
+ print()
80
+
81
+ # Example conversation
82
+ example_messages = [
83
+ "Hello! I'm new to fitness and want to start working out.",
84
+ "I want to build muscle but I only have 30 minutes a day, 3 times a week.",
85
+ "Can you create a specific workout plan for me?"
86
+ ]
87
+
88
+ for i, message in enumerate(example_messages, 1):
89
+ print(f"👤 User (Message {i}): {message}")
90
+
91
+ try:
92
+ # Run the agent
93
+ result = Runner.run_sync(agent, message)
94
+ response = result.final_output
95
+
96
+ print(f"🤖 Assistant: {response}")
97
+ print("\n" + "-"*40 + "\n")
98
+
99
+ except Exception as e:
100
+ print(f"❌ Error: {str(e)}")
101
+ print(" (This is expected if you don't have API keys configured)")
102
+ break
103
+
104
+ except Exception as e:
105
+ print(f"❌ Could not create agent: {e}")
106
+ print(" Make sure you have OPENAI_API_KEY or ANTHROPIC_API_KEY configured")
107
+
108
+
109
+ def run_examples():
110
+ """Run all examples."""
111
+ print("🏋️‍♀️ FITNESS APP EXAMPLES")
112
+ print("="*60)
113
+
114
+ # Model listing example
115
+ example_model_listing()
116
+
117
+ # Agent creation example
118
+ example_agent_creation()
119
+
120
+ print("\n💡 To actually run the agents:")
121
+ print(" - Set ANTHROPIC_API_KEY for Claude models")
122
+ print(" - Set OPENAI_API_KEY for GPT models")
123
+ print(" - Use Runner.run_sync(agent, 'your message') to chat")
124
+
125
+ # Conversation example (commented out by default since it requires API keys)
126
+ print("\n🔄 To see a conversation example, uncomment the following:")
127
+ print(" # asyncio.run(example_agent_conversation())")
128
+
129
+ # Uncomment this line to run the conversation example:
130
+ # asyncio.run(example_agent_conversation())
131
+
132
+
133
+ if __name__ == "__main__":
134
+ run_examples()
apps/gradio-app/src/fitness_gradio/main.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main entry point for the Gradio fitness app.
3
+ """
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ # Add the shared library to the Python path if needed
8
+ shared_path = Path(__file__).parent.parent.parent.parent / "shared" / "src"
9
+ if str(shared_path) not in sys.path:
10
+ sys.path.insert(0, str(shared_path))
11
+
12
+ from fitness_core import setup_logging, Config, get_logger
13
+ from .ui import create_fitness_app
14
+
15
+ # Configure logging
16
+ setup_logging(level=Config.LOG_LEVEL, log_file=Config.LOG_FILE)
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def main():
21
+ """Main entry point for the Gradio application."""
22
+ try:
23
+ # Validate configuration
24
+ config_status = Config.validate_config()
25
+
26
+ if not config_status["valid"]:
27
+ logger.error("Configuration validation failed:")
28
+ for error in config_status["errors"]:
29
+ logger.error(f" - {error}")
30
+ sys.exit(1)
31
+
32
+ # Show warnings
33
+ for warning in config_status["warnings"]:
34
+ logger.warning(warning)
35
+
36
+ # Create and launch the Gradio app
37
+ logger.info("🎨 Starting Fitness Gradio App...")
38
+
39
+ app = create_fitness_app()
40
+ gradio_config = Config.get_gradio_config()
41
+
42
+ logger.info(f"🚀 Launching on http://{gradio_config['server_name']}:{gradio_config['server_port']}")
43
+
44
+ app.launch(**gradio_config)
45
+
46
+ except Exception as e:
47
+ logger.error(f"Failed to start Gradio app: {str(e)}")
48
+ sys.exit(1)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
apps/gradio-app/src/fitness_gradio/ui/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI components and handlers for the Gradio fitness app.
3
+ """
4
+ from .app import FitnessAppUI, create_fitness_app
5
+ from .components import UIComponents
6
+ from .handlers import UIHandlers
7
+
8
+ __all__ = [
9
+ 'FitnessAppUI',
10
+ 'create_fitness_app',
11
+ 'UIComponents',
12
+ 'UIHandlers'
13
+ ]
apps/gradio-app/src/fitness_gradio/ui/app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main Gradio UI application for the fitness app.
3
+ """
4
+ import gradio as gr
5
+ from typing import Dict, Any
6
+
7
+ from .components import UIComponents
8
+ from .handlers import UIHandlers
9
+ from .styles import MAIN_CSS
10
+ from fitness_core.utils import Config, get_logger
11
+
12
+ logger = get_logger(__name__)
13
+
14
+
15
+ class FitnessAppUI:
16
+ """Main UI application class."""
17
+
18
+ def __init__(self):
19
+ """Initialize the UI application."""
20
+ self.demo = None
21
+ self._setup_interface()
22
+
23
+ def _setup_interface(self) -> None:
24
+ """Set up the Gradio interface."""
25
+ with gr.Blocks(
26
+ theme=gr.themes.Soft(),
27
+ title="Fitness AI Assistant",
28
+ css=MAIN_CSS
29
+ ) as self.demo:
30
+
31
+ # Header
32
+ UIComponents.create_header()
33
+
34
+ # Model selection section
35
+ with gr.Row():
36
+ (model_table, model_filter,
37
+ selected_model, model_info_display) = UIComponents.create_model_selection_section()
38
+
39
+ # Main chat interface
40
+ chatbot = UIComponents.create_chatbot()
41
+ chat_input = UIComponents.create_chat_input()
42
+
43
+ # Control buttons
44
+ clear_btn, streaming_toggle = UIComponents.create_control_buttons()
45
+
46
+ # Examples section
47
+ UIComponents.create_examples_section(chat_input)
48
+
49
+ # Help sections
50
+ UIComponents.create_help_section()
51
+ UIComponents.create_model_comparison_section()
52
+
53
+ # Event handlers
54
+ self._setup_event_handlers(
55
+ chatbot, chat_input, clear_btn, streaming_toggle,
56
+ model_table, model_filter, selected_model, model_info_display
57
+ )
58
+
59
+ def _setup_event_handlers(
60
+ self,
61
+ chatbot: gr.Chatbot,
62
+ chat_input: gr.MultimodalTextbox,
63
+ clear_btn: gr.Button,
64
+ streaming_toggle: gr.Checkbox,
65
+ model_table: gr.DataFrame,
66
+ model_filter: gr.Dropdown,
67
+ selected_model: gr.Textbox,
68
+ model_info_display: gr.Markdown
69
+ ) -> None:
70
+ """Set up all event handlers."""
71
+
72
+ # Chat message handling
73
+ chat_msg = chat_input.submit(
74
+ UIHandlers.add_message,
75
+ [chatbot, chat_input],
76
+ [chatbot, chat_input]
77
+ )
78
+ bot_msg = chat_msg.then(
79
+ UIHandlers.dynamic_bot,
80
+ [chatbot, streaming_toggle, selected_model],
81
+ chatbot,
82
+ api_name="bot_response"
83
+ )
84
+ bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
85
+
86
+ # Model table filtering
87
+ model_filter.change(
88
+ UIHandlers.filter_model_table,
89
+ inputs=[model_filter],
90
+ outputs=[model_table]
91
+ )
92
+
93
+ # Model selection from table
94
+ model_table.select(
95
+ UIHandlers.select_model_from_table,
96
+ inputs=[model_table],
97
+ outputs=[selected_model, model_info_display]
98
+ )
99
+
100
+ # Clear conversation handler
101
+ clear_btn.click(UIHandlers.clear_conversation, None, chatbot)
102
+
103
+ # Like/dislike feedback
104
+ chatbot.like(UIHandlers.print_like_dislike, None, None, like_user_message=True)
105
+
106
+ def launch(self, **kwargs) -> None:
107
+ """Launch the Gradio app."""
108
+ # Get default config and merge with provided kwargs
109
+ config = Config.get_gradio_config()
110
+ config.update(kwargs)
111
+
112
+ logger.info(f"Launching fitness app UI on {config['server_name']}:{config['server_port']}")
113
+ self.demo.launch(**config)
114
+
115
+ def get_demo(self) -> gr.Blocks:
116
+ """Get the Gradio demo object."""
117
+ return self.demo
118
+
119
+
120
+ def create_fitness_app() -> gr.Blocks:
121
+ """Create and return a new fitness app UI instance."""
122
+ app = FitnessAppUI()
123
+ return app.get_demo()
apps/gradio-app/src/fitness_gradio/ui/components.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI components for the fitness app.
3
+ """
4
+ import gradio as gr
5
+ from typing import List
6
+
7
+ from fitness_core.agents import FitnessAgent
8
+ from .styles import (
9
+ HEADER_MARKDOWN,
10
+ HELP_CONTENT,
11
+ MODEL_COMPARISON_CONTENT,
12
+ EXAMPLE_PROMPTS
13
+ )
14
+
15
+
16
+ class UIComponents:
17
+ """Factory class for creating UI components."""
18
+
19
+ @staticmethod
20
+ def create_header() -> gr.Markdown:
21
+ """Create the app header."""
22
+ return gr.Markdown(HEADER_MARKDOWN)
23
+
24
+ @staticmethod
25
+ def create_model_selection_section() -> tuple:
26
+ """
27
+ Create the model selection section with table and controls.
28
+
29
+ Returns:
30
+ Tuple of (model_table, model_filter, selected_model, model_info_display)
31
+ """
32
+ with gr.Column():
33
+ gr.Markdown("### 🤖 AI Model Selection")
34
+ gr.Markdown("Browse and select your preferred AI model. Click on a row to select it.")
35
+
36
+ # Create model table data
37
+ table_data = FitnessAgent.get_models_table_data()
38
+
39
+ model_table = gr.DataFrame(
40
+ value=table_data,
41
+ headers=["⭐", "Provider", "Model Name", "Capability", "Speed", "Cost", "Description"],
42
+ datatype=["str", "str", "str", "str", "str", "str", "str"],
43
+ interactive=False,
44
+ wrap=True,
45
+ elem_classes=["model-table"]
46
+ )
47
+
48
+ # Hidden component to manage selection
49
+ selected_model = gr.Textbox(
50
+ value="gpt-4o-mini",
51
+ visible=False,
52
+ label="Selected Model"
53
+ )
54
+
55
+ # Model filter dropdown
56
+ with gr.Row():
57
+ model_filter = gr.Dropdown(
58
+ choices=["All Models", "🔵 Anthropic Only", "🟢 OpenAI Only", "⭐ Recommended Only"],
59
+ value="All Models",
60
+ label="Filter Models",
61
+ scale=3
62
+ )
63
+
64
+ # Model information display
65
+ model_info_display = gr.Markdown(
66
+ value=f"""🤖 **Current Model:** `gpt-4o-mini`
67
+
68
+ 💡 **Description:** {FitnessAgent.get_model_info('gpt-4o-mini')}
69
+
70
+ 📊 **Status:** Ready to chat!""",
71
+ visible=True,
72
+ elem_classes=["model-info"]
73
+ )
74
+
75
+ return model_table, model_filter, selected_model, model_info_display
76
+
77
+ @staticmethod
78
+ def create_chatbot() -> gr.Chatbot:
79
+ """Create the main chatbot component."""
80
+ return gr.Chatbot(
81
+ elem_id="chatbot",
82
+ type="messages",
83
+ show_copy_button=True,
84
+ show_share_button=False,
85
+ avatar_images=None,
86
+ sanitize_html=True,
87
+ render_markdown=True
88
+ )
89
+
90
+ @staticmethod
91
+ def create_chat_input() -> gr.MultimodalTextbox:
92
+ """Create the chat input component."""
93
+ return gr.MultimodalTextbox(
94
+ interactive=True,
95
+ file_count="multiple",
96
+ placeholder="Ask me about fitness, request a workout plan, or get meal planning advice...",
97
+ show_label=False,
98
+ sources=["microphone", "upload"],
99
+ )
100
+
101
+ @staticmethod
102
+ def create_control_buttons() -> tuple:
103
+ """
104
+ Create the control buttons (clear, streaming toggle).
105
+
106
+ Returns:
107
+ Tuple of (clear_btn, streaming_toggle)
108
+ """
109
+ with gr.Row():
110
+ clear_btn = gr.Button("🗑️ Clear Conversation", variant="secondary", size="sm")
111
+ streaming_toggle = gr.Checkbox(
112
+ label="🚀 Enable Real-time Streaming",
113
+ value=True,
114
+ info="Stream responses in real-time as the agent generates them"
115
+ )
116
+
117
+ return clear_btn, streaming_toggle
118
+
119
+ @staticmethod
120
+ def create_examples_section(chat_input: gr.MultimodalTextbox) -> gr.Examples:
121
+ """Create the examples section."""
122
+ with gr.Row():
123
+ return gr.Examples(
124
+ examples=EXAMPLE_PROMPTS,
125
+ inputs=chat_input,
126
+ label="💡 Try asking:"
127
+ )
128
+
129
+ @staticmethod
130
+ def create_help_section() -> gr.Accordion:
131
+ """Create the help accordion section."""
132
+ with gr.Accordion("ℹ️ How to use this assistant", open=False):
133
+ gr.Markdown(HELP_CONTENT)
134
+
135
+ @staticmethod
136
+ def create_model_comparison_section() -> gr.Accordion:
137
+ """Create the model comparison accordion section."""
138
+ with gr.Accordion("🤖 Model Comparison Guide", open=False):
139
+ gr.Markdown(MODEL_COMPARISON_CONTENT)
apps/gradio-app/src/fitness_gradio/ui/handlers.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Event handlers for the fitness app UI.
3
+ """
4
+ import gradio as gr
5
+ import logging
6
+ from typing import List, Dict, Union, Generator, Any, Tuple
7
+
8
+ from fitness_core.agents import FitnessAgent
9
+ from fitness_core.services import ConversationManager, AgentRunner, ResponseFormatter
10
+ from fitness_core.utils import get_logger
11
+
12
+ logger = get_logger(__name__)
13
+
14
+ # Global state management
15
+ conversation_manager = ConversationManager()
16
+ current_agent = None
17
+ current_model = "gpt-4o-mini"
18
+
19
+
20
+ class UIHandlers:
21
+ """Collection of event handlers for the UI."""
22
+
23
+ @staticmethod
24
+ def get_or_create_agent(model_name: str = None) -> FitnessAgent:
25
+ """
26
+ Get the current agent or create a new one with the specified model
27
+
28
+ Args:
29
+ model_name: Name of the AI model to use
30
+
31
+ Returns:
32
+ FitnessAgent instance
33
+ """
34
+ global current_agent, current_model
35
+
36
+ # Use default if no model specified
37
+ if model_name is None:
38
+ model_name = current_model
39
+
40
+ # Create new agent if model changed or no agent exists
41
+ if current_agent is None or current_model != model_name:
42
+ logger.info(f"Creating new agent with model: {model_name}")
43
+ current_agent = FitnessAgent(model_name)
44
+ current_model = model_name
45
+
46
+ return current_agent
47
+
48
+ @staticmethod
49
+ def change_model(new_model: str) -> str:
50
+ """
51
+ Change the current model and reset the agent
52
+
53
+ Args:
54
+ new_model: New model to use
55
+
56
+ Returns:
57
+ Status message
58
+ """
59
+ global current_agent, current_model
60
+
61
+ try:
62
+ # Validate model exists in our supported list
63
+ is_valid, validation_message = FitnessAgent.validate_model_name(new_model)
64
+
65
+ if not is_valid:
66
+ return f"""❌ **Invalid Model Selection**
67
+
68
+ {validation_message}
69
+
70
+ Please select a model from the supported list above."""
71
+
72
+ # Test if we can create an agent with this model (basic validation)
73
+ try:
74
+ test_agent = FitnessAgent(new_model)
75
+ logger.info(f"Successfully validated model: {new_model}")
76
+ except Exception as model_error:
77
+ logger.error(f"Failed to create agent with model {new_model}: {model_error}")
78
+ return f"""❌ **Model Creation Failed**
79
+
80
+ Could not create agent with model `{new_model}`.
81
+
82
+ **Error:** {str(model_error)}
83
+
84
+ Please check your API keys and try a different model."""
85
+
86
+ # Reset agent to force recreation with new model
87
+ current_agent = None
88
+ current_model = new_model
89
+
90
+ # Get model info for user feedback
91
+ model_info = FitnessAgent.get_model_info(new_model)
92
+
93
+ logger.info(f"Model changed to: {new_model}")
94
+ return f"""✅ **Model Successfully Changed!**
95
+
96
+ 🤖 **Current Model:** `{new_model}`
97
+
98
+ 💡 **Description:** {model_info}
99
+
100
+ 🔄 **Status:** Ready to chat with the new model. Your conversation history is preserved."""
101
+
102
+ except Exception as e:
103
+ logger.error(f"Error changing model: {str(e)}")
104
+ return f"❌ **Unexpected Error:** {str(e)}"
105
+
106
+ @staticmethod
107
+ def filter_model_table(filter_choice: str) -> List[List[str]]:
108
+ """Filter the model table based on user selection."""
109
+ all_data = FitnessAgent.get_models_table_data()
110
+
111
+ if filter_choice == "🔵 Anthropic Only":
112
+ return [row for row in all_data if "🔵 Anthropic" in row[1]]
113
+ elif filter_choice == "🟢 OpenAI Only":
114
+ return [row for row in all_data if "🟢 OpenAI" in row[1]]
115
+ elif filter_choice == "⭐ Recommended Only":
116
+ return [row for row in all_data if row[0] == "⭐"]
117
+ else: # All Models
118
+ return all_data
119
+
120
+ @staticmethod
121
+ def select_model_from_table(table_data: Any, evt: gr.SelectData) -> Tuple[str, str]:
122
+ """Select a model from the table"""
123
+ try:
124
+ if evt is None:
125
+ return "", "Please select a model from the table"
126
+
127
+ # Get the selected row index
128
+ row_index = evt.index[0] if evt.index else 0
129
+
130
+ # Handle both DataFrame and list formats
131
+ try:
132
+ # Try pandas DataFrame access first
133
+ if hasattr(table_data, 'iloc') and row_index < len(table_data):
134
+ row = table_data.iloc[row_index]
135
+ if len(row) >= 7:
136
+ rating = row.iloc[0] # Recommendation star
137
+ provider = row.iloc[1] # Provider
138
+ selected_model = row.iloc[2] # Model name
139
+ capability = row.iloc[3] # Capability rating
140
+ speed = row.iloc[4] # Speed rating
141
+ cost = row.iloc[5] # Cost rating
142
+ description = row.iloc[6] # Description
143
+ else:
144
+ return "", "Invalid table row - insufficient columns"
145
+ # Fall back to list access
146
+ elif isinstance(table_data, list) and row_index < len(table_data) and len(table_data[row_index]) >= 7:
147
+ rating = table_data[row_index][0] # Recommendation star
148
+ provider = table_data[row_index][1] # Provider
149
+ selected_model = table_data[row_index][2] # Model name
150
+ capability = table_data[row_index][3] # Capability rating
151
+ speed = table_data[row_index][4] # Speed rating
152
+ cost = table_data[row_index][5] # Cost rating
153
+ description = table_data[row_index][6] # Description
154
+ else:
155
+ return "", "Invalid selection - please try clicking on a model row"
156
+
157
+ except (IndexError, KeyError) as data_error:
158
+ logger.error(f"Data access error: {str(data_error)} - Table type: {type(table_data)}, Row index: {row_index}")
159
+ return "", "Error accessing table data - please try again"
160
+
161
+ # Update the model and get the change result
162
+ change_result = UIHandlers.change_model(selected_model)
163
+
164
+ if "✅" in change_result:
165
+ model_info = f"""✅ **Model Successfully Selected!**
166
+
167
+ 🤖 **Current Model:** `{selected_model}`
168
+ {provider}
169
+ **Capability:** {capability} | **Speed:** {speed} | **Cost:** {cost}
170
+
171
+ 💡 **Description:** {description}
172
+
173
+ 📊 **Status:** Ready to chat with the new model!"""
174
+ else:
175
+ model_info = change_result # Show the error message
176
+
177
+ return selected_model, model_info
178
+
179
+ except Exception as e:
180
+ logger.error(f"Error in select_model_from_table: {str(e)} - Table type: {type(table_data)}, Row index: {row_index if 'row_index' in locals() else 'unknown'}")
181
+ return "", f"Error selecting model: {str(e)}"
182
+
183
+ @staticmethod
184
+ def print_like_dislike(x: gr.LikeData) -> None:
185
+ """Log user feedback on messages"""
186
+ logger.info(f"User feedback - Index: {x.index}, Value: {x.value}, Liked: {x.liked}")
187
+
188
+ @staticmethod
189
+ def add_message(history: List[Dict], message: Dict) -> Tuple[List[Dict], gr.MultimodalTextbox]:
190
+ """
191
+ Add user message to chat history with proper validation
192
+
193
+ Args:
194
+ history: Current Gradio chat history (for display)
195
+ message: User message containing text and/or files
196
+
197
+ Returns:
198
+ Tuple of (updated_history, cleared_input)
199
+ """
200
+ try:
201
+ user_content_parts = []
202
+
203
+ # Handle file uploads
204
+ if message.get("files"):
205
+ for file_path in message["files"]:
206
+ if file_path: # Validate file path exists
207
+ file_content = f"[File uploaded: {file_path}]"
208
+ user_content_parts.append(file_content)
209
+ # Add to Gradio history for display
210
+ history.append({
211
+ "role": "user",
212
+ "content": {"path": file_path}
213
+ })
214
+
215
+ # Handle text input
216
+ if message.get("text") and message["text"].strip():
217
+ text_content = message["text"].strip()
218
+ user_content_parts.append(text_content)
219
+ # Add to Gradio history for display
220
+ history.append({
221
+ "role": "user",
222
+ "content": text_content
223
+ })
224
+
225
+ # Add to conversation manager (combine file and text content)
226
+ if user_content_parts:
227
+ combined_content = "\n".join(user_content_parts)
228
+ conversation_manager.add_user_message(combined_content)
229
+ logger.info(f"Added user message to conversation. {conversation_manager.get_history_summary()}")
230
+
231
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
232
+
233
+ except Exception as e:
234
+ logger.error(f"Error adding message: {str(e)}")
235
+ # Add error message to history
236
+ history.append({
237
+ "role": "assistant",
238
+ "content": "Sorry, there was an error processing your message. Please try again."
239
+ })
240
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
241
+
242
+ @staticmethod
243
+ def bot_with_real_streaming(
244
+ history: List[Dict],
245
+ model_name: str = None
246
+ ) -> Generator[List[Dict], None, None]:
247
+ """
248
+ Bot function with real-time streaming from the agent
249
+
250
+ Args:
251
+ history: Current Gradio chat history (for display only)
252
+ model_name: Model to use for the agent
253
+
254
+ Yields:
255
+ Updated history with real-time streaming response
256
+ """
257
+ try:
258
+ # Get agent instance with specified model
259
+ agent = UIHandlers.get_or_create_agent(model_name)
260
+
261
+ # Get input for agent from conversation manager
262
+ agent_input = conversation_manager.get_input_for_agent()
263
+ logger.info(f"Sending to agent ({current_model}): {type(agent_input)} - {conversation_manager.get_history_summary()}")
264
+
265
+ # Add empty assistant message for streaming
266
+ history.append({"role": "assistant", "content": ""})
267
+
268
+ # Use the AgentRunner for streaming execution
269
+ logger.info(f"Using real-time streaming mode")
270
+
271
+ # Direct execution without ThreadPoolExecutor to avoid event loop issues
272
+ try:
273
+ content_chunks = []
274
+ final_result = None
275
+
276
+ for chunk in AgentRunner.run_agent_with_streaming_sync(agent, agent_input):
277
+ if chunk['type'] == 'final_result':
278
+ final_result = chunk['result']
279
+ if chunk['content']:
280
+ content_chunks.append(chunk['content'])
281
+ elif chunk['type'] == 'error':
282
+ final_result = chunk['result']
283
+ content_chunks.append(chunk['content'])
284
+
285
+ # Update conversation manager
286
+ if final_result:
287
+ conversation_manager.update_from_result(final_result)
288
+ logger.info(f"Updated conversation manager. {conversation_manager.get_history_summary()}")
289
+
290
+ # Stream the content updates to the UI
291
+ if content_chunks:
292
+ for content in content_chunks:
293
+ history[-1]["content"] = content
294
+ yield history
295
+ else:
296
+ history[-1]["content"] = "I apologize, but I didn't receive a response. Please try again."
297
+ yield history
298
+
299
+ except Exception as e:
300
+ logger.error(f"Error in streaming execution: {str(e)}")
301
+ history[-1]["content"] = f"Sorry, I encountered an error while processing your request: {str(e)}"
302
+ yield history
303
+
304
+ except Exception as e:
305
+ logger.error(f"Bot streaming function error: {str(e)}")
306
+ if len(history) == 0 or history[-1].get("role") != "assistant":
307
+ history.append({"role": "assistant", "content": ""})
308
+ history[-1]["content"] = "I apologize, but I'm experiencing technical difficulties. Please try again in a moment."
309
+ yield history
310
+
311
+ @staticmethod
312
+ def bot(history: List[Dict], model_name: str = None) -> Generator[List[Dict], None, None]:
313
+ """
314
+ Main bot function with simulated streaming
315
+
316
+ Args:
317
+ history: Current Gradio chat history (for display only)
318
+ model_name: Model to use for the agent
319
+
320
+ Yields:
321
+ Updated history with bot response
322
+ """
323
+ try:
324
+ # Get agent instance with specified model
325
+ agent = UIHandlers.get_or_create_agent(model_name)
326
+
327
+ # Get input for agent from conversation manager
328
+ agent_input = conversation_manager.get_input_for_agent()
329
+ logger.info(f"Sending to agent ({current_model}): {type(agent_input)} - {conversation_manager.get_history_summary()}")
330
+
331
+ # Run agent safely with sync wrapper
332
+ result = AgentRunner.run_agent_safely_sync(agent, agent_input)
333
+
334
+ # Update conversation manager with the result
335
+ conversation_manager.update_from_result(result)
336
+ logger.info(f"Updated conversation manager. {conversation_manager.get_history_summary()}")
337
+
338
+ # Extract and format response for display
339
+ response = ResponseFormatter.extract_response_content(result)
340
+
341
+ # Stream the response with simulated typing
342
+ yield from ResponseFormatter.stream_response(response, history)
343
+
344
+ except Exception as e:
345
+ logger.error(f"Bot function error: {str(e)}")
346
+ error_response = "I apologize, but I'm experiencing technical difficulties. Please try again in a moment."
347
+ yield from ResponseFormatter.stream_response(error_response, history)
348
+
349
+ @staticmethod
350
+ def dynamic_bot(
351
+ history: List[Dict],
352
+ use_real_streaming: bool = True,
353
+ model_name: str = None
354
+ ) -> Generator[List[Dict], None, None]:
355
+ """
356
+ Dynamic bot function that can switch between streaming modes
357
+
358
+ Args:
359
+ history: Current Gradio chat history (for display only)
360
+ use_real_streaming: Whether to use real-time streaming from agent
361
+ model_name: Model to use for the agent
362
+
363
+ Yields:
364
+ Updated history with bot response
365
+ """
366
+ if use_real_streaming:
367
+ logger.info("Using real-time streaming mode")
368
+ yield from UIHandlers.bot_with_real_streaming(history, model_name)
369
+ else:
370
+ logger.info("Using simulated streaming mode")
371
+ yield from UIHandlers.bot(history, model_name)
372
+
373
+ @staticmethod
374
+ def clear_conversation() -> List[Dict]:
375
+ """
376
+ Clear the conversation history
377
+
378
+ Returns:
379
+ Empty chat history
380
+ """
381
+ global conversation_manager
382
+ conversation_manager.clear_history()
383
+ logger.info("Conversation history cleared")
384
+ return []
apps/gradio-app/src/fitness_gradio/ui/styles.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CSS styles and theming for the fitness app UI.
3
+ """
4
+
5
+ # Main CSS for the Gradio interface
6
+ MAIN_CSS = """
7
+ .gradio-container {
8
+ max-width: 1200px !important;
9
+ }
10
+
11
+ #chatbot {
12
+ height: 600px;
13
+ }
14
+
15
+ .model-info {
16
+ background: linear-gradient(135deg, rgba(55, 65, 81, 0.9), rgba(75, 85, 99, 0.7)) !important;
17
+ color: #e5e7eb !important;
18
+ padding: 16px !important;
19
+ border-radius: 12px !important;
20
+ border-left: 4px solid #10b981 !important;
21
+ margin: 12px 0 !important;
22
+ border: 1px solid rgba(75, 85, 99, 0.4) !important;
23
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
24
+ backdrop-filter: blur(10px) !important;
25
+ }
26
+
27
+ .model-info p {
28
+ color: #e5e7eb !important;
29
+ margin: 8px 0 !important;
30
+ line-height: 1.5 !important;
31
+ }
32
+
33
+ .model-info strong {
34
+ color: #f9fafb !important;
35
+ font-weight: 600 !important;
36
+ }
37
+
38
+ .model-info em {
39
+ color: #d1d5db !important;
40
+ font-style: italic;
41
+ }
42
+
43
+ .model-info code {
44
+ background-color: rgba(31, 41, 55, 0.8) !important;
45
+ color: #10b981 !important;
46
+ padding: 2px 6px !important;
47
+ border-radius: 4px !important;
48
+ font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important;
49
+ font-size: 0.9em !important;
50
+ }
51
+
52
+ .model-dropdown {
53
+ font-weight: bold;
54
+ }
55
+
56
+ /* Ensure all text in model-info respects dark theme */
57
+ .model-info * {
58
+ color: inherit !important;
59
+ }
60
+
61
+ /* Fix for any remaining white background issues */
62
+ .model-info .prose {
63
+ color: #e5e7eb !important;
64
+ }
65
+ """
66
+
67
+ # Header markdown content
68
+ HEADER_MARKDOWN = """
69
+ # 🏋️‍♀️ Fitness AI Assistant
70
+ Your personal fitness companion for workout plans, meal planning, and fitness guidance!
71
+
72
+ 💡 **Tips:**
73
+ - Be specific about your fitness goals
74
+ - Mention any physical limitations or preferences
75
+ - Ask for modifications if needed
76
+ - Choose your preferred AI model for different capabilities
77
+ """
78
+
79
+ # Help content for the accordion
80
+ HELP_CONTENT = """
81
+ **What I can help you with:**
82
+ - Create personalized workout plans
83
+ - Design meal plans for your goals
84
+ - Provide fitness guidance and tips
85
+ - Suggest exercises for specific needs
86
+ - Help modify existing plans
87
+
88
+ **To get the best results:**
89
+ - Tell me your fitness level (beginner, intermediate, advanced)
90
+ - Mention your goals (weight loss, muscle gain, general fitness)
91
+ - Include any equipment you have access to
92
+ - Let me know about any injuries or limitations
93
+
94
+ **AI Model Selection:**
95
+ - **🔵 Anthropic Claude Models**: Excellent for detailed reasoning and analysis
96
+ - Claude-4: Most capable (premium), Claude-3.7: Extended thinking
97
+ - Claude-3.5: Balanced performance, Claude-3: Fast and cost-effective
98
+ - **🟢 OpenAI GPT Models**: Great for general tasks and familiar interface
99
+ - GPT-4o: Latest with vision, GPT-4 Turbo: Large context window
100
+ - GPT-3.5: Fast and economical, o1/o3: Advanced reasoning
101
+ - You can change models anytime - the conversation continues seamlessly
102
+ - Mix and match providers based on your preferences
103
+
104
+ **Conversation Management:**
105
+ - The assistant remembers our entire conversation
106
+ - You can refer back to previous plans or discussions
107
+ - Use the "Clear Conversation" button to start fresh
108
+ - Each conversation maintains context across multiple exchanges
109
+
110
+ **Streaming Options:**
111
+ - **Real-time Streaming**: Responses appear as the AI generates them using `Runner.run_streamed()` (most engaging)
112
+ - **Simulated Streaming**: Responses are generated fully, then displayed with typing effect (more reliable)
113
+ - Toggle the streaming mode using the checkbox above
114
+ - Real-time streaming shows tool calls, outputs, and message generation in real-time
115
+ - **Note**: Anthropic models automatically fall back to non-streaming if validation errors occur
116
+ """
117
+
118
+ # Model comparison guide content
119
+ MODEL_COMPARISON_CONTENT = """
120
+ ## 🔵 Anthropic Claude Models
121
+
122
+ | Model | Capability | Speed | Cost | Best For |
123
+ |-------|------------|--------|------|----------|
124
+ | claude-4-opus | ★★★★★ | ★★★☆☆ | ★★★★★ | Complex analysis, detailed plans |
125
+ | claude-4-sonnet | ★★★★☆ | ★★★★☆ | ★★★★☆ | Balanced high performance |
126
+ | claude-3.7-sonnet | ★★★★☆ | ★★★★☆ | ★★★☆☆ | Extended thinking, complex tasks |
127
+ | claude-3.5-sonnet | ★★★★☆ | ★★★★☆ | ★★★☆☆ | General use, balanced |
128
+ | claude-3.5-haiku | ★★★☆☆ | ★★★★★ | ★★☆☆☆ | Fast responses |
129
+ | claude-3-haiku | ★★★☆☆ | ★★★★★ | ★☆☆☆☆ | Most cost-effective |
130
+
131
+ ## 🟢 OpenAI GPT Models
132
+
133
+ | Model | Capability | Speed | Cost | Best For |
134
+ |-------|------------|--------|------|----------|
135
+ | gpt-4o | ★★★★★ | ★★★★☆ | ★★★★☆ | Latest features, vision support |
136
+ | gpt-4o-mini | ★★★★☆ | ★★★★★ | ★★☆☆☆ | **DEFAULT** - Balanced performance, affordable |
137
+ | gpt-4-turbo | ★★★★☆ | ★★★★☆ | ★★★★☆ | Large context, reliable |
138
+ | gpt-3.5-turbo | ★★★☆☆ | ★★★★★ | ★☆☆☆☆ | Fast and economical |
139
+ | o1-preview | ★★★★★ | ★★☆☆☆ | ★★★★★ | Advanced reasoning |
140
+ | o1-mini | ★★★★☆ | ★★★☆☆ | ★★★☆☆ | Reasoning tasks |
141
+ | o3-mini | ★★★★☆ | ★★★★☆ | ★★★☆☆ | Latest reasoning model |
142
+
143
+ ### 💡 Provider Comparison
144
+ - **🔵 Anthropic**: Excellent for detailed analysis, safety-focused, great for complex fitness planning
145
+ - **🟢 OpenAI**: Familiar interface, good general performance, strong tool usage
146
+
147
+ ### 🎯 Recommendations by Use Case
148
+ - **Quick questions**: claude-3.5-haiku, gpt-4o-mini, gpt-3.5-turbo
149
+ - **Comprehensive plans**: claude-3.5-sonnet, gpt-4o, claude-3.7-sonnet
150
+ - **Complex analysis**: claude-4-opus, gpt-4o, o1-preview
151
+ - **Budget-conscious**: claude-3-haiku, gpt-3.5-turbo, gpt-4o-mini
152
+ """
153
+
154
+ # Example prompts for the Examples component
155
+ EXAMPLE_PROMPTS = [
156
+ "Create a beginner workout plan for me",
157
+ "I want to lose weight - help me with a fitness plan",
158
+ "Design a muscle building program for intermediate level",
159
+ "I need a meal plan for gaining muscle mass",
160
+ "What exercises should I do for better cardiovascular health?",
161
+ "Help me with a home workout routine with no equipment"
162
+ ]
fitness_agent/__init__.py CHANGED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Fitness Agent package - for Hugging Face Spaces deployment
3
+ """
4
+
5
+ from .fitness_agent import FitnessAgent
6
+
7
+ __all__ = ["FitnessAgent"]
fitness_agent/app.py CHANGED
@@ -1,1269 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- import time
3
- import asyncio
4
- import logging
5
- import re
6
- from typing import List, Dict, Any, Optional, Union, Generator
7
- from concurrent.futures import ThreadPoolExecutor
8
- from fitness_agent import FitnessAgent
9
- from agents import Agent, ItemHelpers, Runner, function_tool
10
 
11
- # Configure logging
12
- logging.basicConfig(level=logging.INFO)
13
- logger = logging.getLogger(__name__)
14
-
15
- # Check what streaming methods are available
16
- logger.info(f"Available Runner methods: {[method for method in dir(Runner) if not method.startswith('_')]}")
17
-
18
-
19
- class FitnessUIError(Exception):
20
- """Custom exception for UI-related errors"""
21
- pass
22
-
23
-
24
- class ConversationManager:
25
- """Manages conversation history and state for the fitness agent"""
26
-
27
- def __init__(self):
28
- self.conversation_history: List[Dict[str, str]] = []
29
- self.thread_id = "fitness_thread_001" # Could be made dynamic per session
30
-
31
- def add_user_message(self, content: str) -> None:
32
- """Add a user message to the conversation history"""
33
- self.conversation_history.append({"role": "user", "content": content})
34
-
35
- def get_input_for_agent(self) -> Union[str, List[Dict[str, str]]]:
36
- """Get the input format needed for the agent"""
37
- if not self.conversation_history:
38
- return "Hello"
39
- elif len(self.conversation_history) == 1:
40
- # First message - just send the content
41
- return self.conversation_history[0]["content"]
42
- else:
43
- # Multiple messages - send the full history
44
- return self.conversation_history
45
-
46
- def update_from_result(self, result) -> None:
47
- """Update conversation history from agent result"""
48
- if hasattr(result, 'to_input_list'):
49
- # Update our history with the complete conversation from the agent
50
- self.conversation_history = result.to_input_list()
51
- else:
52
- # Fallback: manually add the assistant response
53
- if hasattr(result, 'final_output'):
54
- response_content = result.final_output
55
- else:
56
- response_content = str(result)
57
-
58
- self.conversation_history.append({"role": "assistant", "content": str(response_content)})
59
-
60
- def clear_history(self) -> None:
61
- """Clear the conversation history"""
62
- self.conversation_history = []
63
-
64
- def get_history_summary(self) -> str:
65
- """Get a summary of the conversation for debugging"""
66
- return f"Conversation has {len(self.conversation_history)} messages"
67
-
68
-
69
- def get_or_create_agent(model_name: str = None) -> FitnessAgent:
70
- """
71
- Get the current agent or create a new one with the specified model
72
-
73
- Args:
74
- model_name: Name of the Anthropic model to use
75
-
76
- Returns:
77
- FitnessAgent instance
78
- """
79
- global current_agent, current_model
80
-
81
- # Use default if no model specified
82
- if model_name is None:
83
- model_name = current_model
84
-
85
- # Create new agent if model changed or no agent exists
86
- if current_agent is None or current_model != model_name:
87
- logger.info(f"Creating new agent with model: {model_name}")
88
- current_agent = FitnessAgent(model_name)
89
- current_model = model_name
90
-
91
- return current_agent
92
-
93
-
94
- def change_model(new_model: str) -> str:
95
- """
96
- Change the current model and reset the agent
97
-
98
- Args:
99
- new_model: New model to use
100
-
101
- Returns:
102
- Status message
103
- """
104
- global current_agent, current_model
105
-
106
- try:
107
- # Validate model exists in our supported list
108
- available_models = FitnessAgent.list_supported_models()
109
- is_valid, validation_message = FitnessAgent.validate_model_name(new_model)
110
-
111
- if not is_valid:
112
- return f"❌ **Model Validation Failed**\n\n{validation_message}"
113
-
114
- # Test if we can create an agent with this model (basic validation)
115
- try:
116
- logger.info(f"Attempting to create test agent with model: {new_model}")
117
- test_agent = FitnessAgent(new_model)
118
- logger.info(f"Successfully created test agent with model: {new_model}")
119
- # If we get here, the model is likely available
120
- except Exception as model_error:
121
- # Get the full error details for debugging
122
- error_str = str(model_error)
123
- error_repr = repr(model_error)
124
- error_type = type(model_error).__name__
125
-
126
- logger.error(f"Model initialization error for {new_model}:")
127
- logger.error(f" Error type: {error_type}")
128
- logger.error(f" Error string: {error_str}")
129
- logger.error(f" Error repr: {error_repr}")
130
-
131
- # Clean up error message for display
132
- clean_error = error_str.replace('\n', ' ').replace('\r', ' ')
133
- clean_error = clean_error[:400] + "..." if len(clean_error) > 400 else clean_error
134
-
135
- # Check for specific error types
136
- if "not_found_error" in error_str.lower() or "notfounderror" in error_str.lower():
137
- recommended = ", ".join(FitnessAgent.get_recommended_models())
138
- return f"""❌ **Model Not Available**
139
-
140
- 🚫 **Error:** Model `{new_model}` is not currently available on your API account.
141
-
142
- 💡 **This could mean:**
143
- - The model requires special access or higher tier subscription
144
- - The model has been deprecated
145
- - The model name is incorrect
146
-
147
- 🎯 **Try these recommended models instead:**
148
- {recommended}
149
-
150
- 🔧 **Current Model:** `{current_model}` (unchanged)"""
151
- elif "api" in error_str.lower() and "key" in error_str.lower():
152
- return f"""❌ **API Configuration Error**
153
-
154
- 🚫 **Error:** There seems to be an issue with your Anthropic API configuration.
155
-
156
- 💡 **Please check:**
157
- - Your ANTHROPIC_API_KEY environment variable is set
158
- - Your API key is valid and has the necessary permissions
159
- - Your account has access to the requested model
160
-
161
- 📝 **Error Details:** {clean_error}
162
-
163
- 🔧 **Current Model:** `{current_model}` (unchanged)
164
-
165
- 💡 **Tip:** Try using an OpenAI model if Anthropic models are not working."""
166
- else:
167
- return f"""❌ **Model Error**
168
-
169
- 🚫 **Error:** Failed to initialize model `{new_model}`
170
-
171
- 📝 **Error Type:** {error_type}
172
- 📝 **Details:** {clean_error}
173
-
174
- 🔧 **Current Model:** `{current_model}` (unchanged)
175
-
176
- 💡 **Debugging Info:**
177
- - Provider: {"Anthropic" if "claude" in new_model else "OpenAI" if any(x in new_model for x in ["gpt", "o1", "o3"]) else "Unknown"}
178
- - Try using a different model or check your API configuration"""
179
-
180
- # Reset agent to force recreation with new model
181
- current_agent = None
182
- current_model = new_model
183
-
184
- # Get model info for user feedback
185
- model_info = FitnessAgent.get_model_info(new_model)
186
-
187
- logger.info(f"Model changed to: {new_model}")
188
- return f"""✅ **Model Successfully Changed!**
189
-
190
- 🤖 **Current Model:** `{new_model}`
191
-
192
- 💡 **Description:** {model_info}
193
-
194
- 🔄 **Status:** Ready to chat with the new model. Your conversation history is preserved."""
195
-
196
- except Exception as e:
197
- logger.error(f"Error changing model: {str(e)}")
198
- return f"❌ **Unexpected Error:** {str(e)}"
199
-
200
-
201
- def filter_model_table(filter_choice: str) -> list:
202
- """Filter the model table based on user selection."""
203
- all_data = FitnessAgent.get_models_table_data()
204
-
205
- if filter_choice == "🔵 Anthropic Only":
206
- return [row for row in all_data if "🔵 Anthropic" in row[1]]
207
- elif filter_choice == "🟢 OpenAI Only":
208
- return [row for row in all_data if "🟢 OpenAI" in row[1]]
209
- elif filter_choice == "⭐ Recommended Only":
210
- return [row for row in all_data if row[0] == "⭐"]
211
- else: # All Models
212
- return all_data
213
-
214
-
215
- def select_model_from_table(table_data, evt: gr.SelectData):
216
- """Select a model from the table"""
217
- try:
218
- if evt is None:
219
- return "", "Please select a model from the table"
220
-
221
- # Get the selected row index
222
- row_index = evt.index[0] if evt.index else 0
223
-
224
- # Handle both DataFrame and list formats
225
- try:
226
- # Try pandas DataFrame access first
227
- if hasattr(table_data, 'iloc') and row_index < len(table_data):
228
- row = table_data.iloc[row_index]
229
- if len(row) >= 7:
230
- rating = row.iloc[0] # Recommendation star
231
- provider = row.iloc[1] # Provider
232
- selected_model = row.iloc[2] # Model name
233
- capability = row.iloc[3] # Capability rating
234
- speed = row.iloc[4] # Speed rating
235
- cost = row.iloc[5] # Cost rating
236
- description = row.iloc[6] # Description
237
- else:
238
- return "", "Invalid table row - insufficient columns"
239
- # Fall back to list access
240
- elif isinstance(table_data, list) and row_index < len(table_data) and len(table_data[row_index]) >= 7:
241
- rating = table_data[row_index][0] # Recommendation star
242
- provider = table_data[row_index][1] # Provider
243
- selected_model = table_data[row_index][2] # Model name
244
- capability = table_data[row_index][3] # Capability rating
245
- speed = table_data[row_index][4] # Speed rating
246
- cost = table_data[row_index][5] # Cost rating
247
- description = table_data[row_index][6] # Description
248
- else:
249
- return "", "Invalid selection - please try clicking on a model row"
250
-
251
- except (IndexError, KeyError) as data_error:
252
- logger.error(f"Data access error: {str(data_error)} - Table type: {type(table_data)}, Row index: {row_index}")
253
- return "", "Error accessing table data - please try again"
254
-
255
- # Update the model and get the change result
256
- change_result = change_model(selected_model)
257
-
258
- if "✅" in change_result:
259
- model_info = f"""✅ **Model Successfully Selected!**
260
-
261
- 🤖 **Current Model:** `{selected_model}`
262
- {provider}
263
- **Capability:** {capability} | **Speed:** {speed} | **Cost:** {cost}
264
-
265
- 💡 **Description:** {description}
266
-
267
- 📊 **Status:** Ready to chat with the new model!"""
268
- else:
269
- model_info = change_result # Show the error message
270
-
271
- return selected_model, model_info
272
-
273
- except Exception as e:
274
- logger.error(f"Error in select_model_from_table: {str(e)} - Table type: {type(table_data)}, Row index: {row_index if 'row_index' in locals() else 'unknown'}")
275
- return "", f"Error selecting model: {str(e)}"
276
-
277
-
278
- def update_model_and_display(selected_model: str) -> str:
279
- """
280
- Update both the model and the display when dropdown selection changes
281
-
282
- Args:
283
- selected_model: Selected model from dropdown
284
-
285
- Returns:
286
- Formatted model information
287
- """
288
- # Ignore separator selections
289
- separators = [
290
- "--- Anthropic Models ---",
291
- "--- OpenAI Models ---",
292
- "--- Other Models ---",
293
- "--- Legacy/Experimental ---"
294
- ]
295
- if selected_model in separators:
296
- return f"""⚠️ **Please select a specific model**
297
-
298
- The separator "{selected_model}" is not a valid model choice.
299
-
300
- Please choose one of the actual model names from the dropdown."""
301
-
302
- # Update the actual model
303
- change_result = change_model(selected_model)
304
-
305
- # If the change was successful, return success message, otherwise return the error
306
- if "✅" in change_result:
307
- try:
308
- model_info = FitnessAgent.get_model_info(selected_model)
309
-
310
- # Determine provider emoji
311
- provider_emoji = "🔵" if "claude" in selected_model else "🟢" if any(x in selected_model for x in ["gpt", "o1", "o3"]) else "⚪"
312
-
313
- return f"""{provider_emoji} **Current Model:** `{selected_model}`
314
-
315
- 💡 **Description:** {model_info}
316
-
317
- 📊 **Status:** Model updated and ready to chat!"""
318
- except Exception as e:
319
- return f"""🤖 **Current Model:** `{selected_model}`
320
-
321
- ❌ *Model information not available*
322
-
323
- 📊 **Status:** Ready to chat!"""
324
- else:
325
- # Return the error message from change_model
326
- return change_result
327
-
328
-
329
- def print_like_dislike(x: gr.LikeData) -> None:
330
- """Log user feedback on messages"""
331
- logger.info(f"User feedback - Index: {x.index}, Value: {x.value}, Liked: {x.liked}")
332
-
333
-
334
- # Global conversation manager instance
335
- conversation_manager = ConversationManager()
336
-
337
- # Global agent instance that can be updated with model changes
338
- current_agent = None
339
- current_model = "claude-3.5-haiku" # Updated default model
340
-
341
-
342
- def add_message(history: List[Dict], message: Dict) -> tuple:
343
- """
344
- Add user message to chat history with proper validation
345
-
346
- Args:
347
- history: Current Gradio chat history (for display)
348
- message: User message containing text and/or files
349
-
350
- Returns:
351
- Tuple of (updated_history, cleared_input)
352
- """
353
- try:
354
- user_content_parts = []
355
-
356
- # Handle file uploads
357
- if message.get("files"):
358
- for file_path in message["files"]:
359
- if file_path: # Validate file path exists
360
- file_content = f"[File uploaded: {file_path}]"
361
- user_content_parts.append(file_content)
362
- # Add to Gradio history for display
363
- history.append({
364
- "role": "user",
365
- "content": {"path": file_path}
366
- })
367
-
368
- # Handle text input
369
- if message.get("text") and message["text"].strip():
370
- text_content = message["text"].strip()
371
- user_content_parts.append(text_content)
372
- # Add to Gradio history for display
373
- history.append({
374
- "role": "user",
375
- "content": text_content
376
- })
377
-
378
- # Add to conversation manager (combine file and text content)
379
- if user_content_parts:
380
- combined_content = "\n".join(user_content_parts)
381
- conversation_manager.add_user_message(combined_content)
382
- logger.info(f"Added user message to conversation. {conversation_manager.get_history_summary()}")
383
-
384
- return history, gr.MultimodalTextbox(value=None, interactive=False)
385
-
386
- except Exception as e:
387
- logger.error(f"Error adding message: {str(e)}")
388
- # Add error message to history
389
- history.append({
390
- "role": "assistant",
391
- "content": "Sorry, there was an error processing your message. Please try again."
392
- })
393
- return history, gr.MultimodalTextbox(value=None, interactive=False)
394
-
395
-
396
- async def run_agent_with_streaming(agent: FitnessAgent, agent_input: Union[str, List[Dict[str, str]]]):
397
- """
398
- Run the agent with streaming support using the correct Runner.run_streamed API
399
-
400
- Args:
401
- agent: The fitness agent instance
402
- agent_input: Input for the agent (string for first message, list for conversation)
403
-
404
- Yields:
405
- Streaming response chunks from the agent with content and final result
406
- """
407
- try:
408
- logger.info(f"Running agent with streaming. Input type: {type(agent_input)}")
409
-
410
- # Use the correct streaming API
411
- result = Runner.run_streamed(agent, agent_input)
412
-
413
- accumulated_content = ""
414
- final_result = None
415
- has_content = False
416
-
417
- try:
418
- async for event in result.stream_events():
419
- # Skip raw response events as suggested in the example
420
- if event.type == "raw_response_event":
421
- continue
422
-
423
- # Handle different event types
424
- elif event.type == "agent_updated_stream_event":
425
- logger.debug(f"Agent updated: {event.new_agent.name}")
426
- continue
427
-
428
- elif event.type == "run_item_stream_event":
429
- if event.item.type == "tool_call_item":
430
- logger.debug("Tool was called")
431
-
432
- elif event.item.type == "tool_call_output_item":
433
- logger.debug(f"Tool output: {event.item.output}")
434
-
435
- elif event.item.type == "message_output_item":
436
- # This is where the actual message content comes from
437
- try:
438
- message_content = ItemHelpers.text_message_output(event.item)
439
- if message_content:
440
- accumulated_content = message_content
441
- has_content = True
442
- # Yield a chunk-like object for streaming display
443
- yield {
444
- 'type': 'content_chunk',
445
- 'content': message_content,
446
- 'accumulated': accumulated_content
447
- }
448
- except Exception as item_error:
449
- logger.warning(f"Error extracting message content: {item_error}")
450
- # Continue processing other events
451
- continue
452
-
453
- except Exception as streaming_error:
454
- # Check if this is the specific Pydantic validation error for Anthropic models
455
- error_str = str(streaming_error)
456
- if "validation error for ResponseTextDeltaEvent" in error_str and "logprobs" in error_str:
457
- logger.warning("Detected Anthropic model streaming validation error, falling back to non-streaming mode")
458
-
459
- # Fall back to non-streaming execution
460
- try:
461
- fallback_result = await Runner.run(agent, agent_input)
462
- final_result = fallback_result
463
-
464
- # Extract content from fallback result
465
- if hasattr(fallback_result, 'final_output'):
466
- accumulated_content = str(fallback_result.final_output)
467
- else:
468
- accumulated_content = str(fallback_result)
469
-
470
- has_content = True
471
-
472
- # Yield the content as if it was streamed
473
- yield {
474
- 'type': 'content_chunk',
475
- 'content': accumulated_content,
476
- 'accumulated': accumulated_content
477
- }
478
-
479
- except Exception as fallback_error:
480
- logger.error(f"Fallback execution also failed: {fallback_error}")
481
- raise streaming_error # Re-raise original error
482
- else:
483
- # Re-raise if it's a different type of error
484
- raise streaming_error
485
-
486
- # Get the final result if we haven't already from fallback
487
- if final_result is None:
488
- try:
489
- final_result = await result.get_final_result()
490
- except Exception as final_error:
491
- logger.warning(f"Error getting final result: {final_error}")
492
- # Create a mock final result if we have content
493
- if has_content:
494
- class MockResult:
495
- def __init__(self, content):
496
- self.final_output = content
497
- def to_input_list(self):
498
- return [{"role": "assistant", "content": self.final_output}]
499
-
500
- final_result = MockResult(accumulated_content)
501
-
502
- # Yield the final result for conversation management
503
- yield {
504
- 'type': 'final_result',
505
- 'result': final_result,
506
- 'content': accumulated_content
507
- }
508
-
509
- except Exception as e:
510
- logger.error(f"Agent streaming error: {str(e)}")
511
- # Return error as a final result-like object
512
- class ErrorResult:
513
- def __init__(self, error_message):
514
- self.final_output = error_message
515
-
516
- def to_input_list(self):
517
- return []
518
-
519
- yield {
520
- 'type': 'error',
521
- 'result': ErrorResult(f"Sorry, I encountered an error while processing your request: {str(e)}"),
522
- 'content': f"Sorry, I encountered an error while processing your request: {str(e)}"
523
- }
524
-
525
-
526
- def run_agent_safely_sync(agent: FitnessAgent, agent_input: Union[str, List[Dict[str, str]]]) -> Any:
527
- """
528
- Synchronous wrapper for the agent execution - now using proper Runner.run method
529
-
530
- Args:
531
- agent: The fitness agent instance
532
- agent_input: Input for the agent (string for first message, list for conversation)
533
-
534
- Returns:
535
- Final agent result
536
- """
537
- def _run_agent():
538
- try:
539
- loop = asyncio.new_event_loop()
540
- asyncio.set_event_loop(loop)
541
- try:
542
- logger.info(f"Running agent sync with input type: {type(agent_input)}")
543
-
544
- # Use the correct async method
545
- async def run_async():
546
- return await Runner.run(agent, agent_input)
547
-
548
- result = loop.run_until_complete(run_async())
549
- return result
550
- finally:
551
- loop.close()
552
- except Exception as e:
553
- logger.error(f"Agent execution error: {str(e)}")
554
-
555
- # Create a mock result object for error cases
556
- class ErrorResult:
557
- def __init__(self, error_message):
558
- self.final_output = error_message
559
-
560
- def to_input_list(self):
561
- return []
562
-
563
- return ErrorResult(f"Sorry, I encountered an error while processing your request: {str(e)}")
564
-
565
- try:
566
- with ThreadPoolExecutor(max_workers=1) as executor:
567
- future = executor.submit(_run_agent)
568
- # Add timeout to prevent hanging
569
- return future.result(timeout=60) # 60 second timeout
570
- except Exception as e:
571
- logger.error(f"Executor error: {str(e)}")
572
-
573
- # Create a mock result object for error cases
574
- class ErrorResult:
575
- def __init__(self, error_message):
576
- self.final_output = error_message
577
-
578
- def to_input_list(self):
579
- return []
580
-
581
- return ErrorResult("Sorry, I'm having trouble processing your request right now. Please try again.")
582
-
583
-
584
- def parse_fitness_plan_from_string(plan_str: str) -> str:
585
- """
586
- Parse a fitness plan from its string representation
587
-
588
- Args:
589
- plan_str: String representation of a fitness plan object
590
-
591
- Returns:
592
- Formatted markdown string
593
- """
594
- try:
595
- import re
596
-
597
- # Extract name - handle both single and double quotes
598
- name_match = re.search(r"name=['\"]([^'\"]*)['\"]", plan_str)
599
- name = name_match.group(1) if name_match else "Fitness Plan"
600
-
601
- # Extract training plan - handle both list format and simple string format
602
- training_plan = ""
603
-
604
- # Try list format first (with brackets and quotes)
605
- training_match = re.search(r"training_plan=['\"](\[.*?\])['\"]", plan_str, re.DOTALL)
606
- if training_match:
607
- training_raw = training_match.group(1)
608
- # Clean up the training plan format
609
- training_items = re.findall(r'"([^"]*)"', training_raw)
610
- training_plan = "\n".join(f"• {item.strip()}" for item in training_items if item.strip())
611
- else:
612
- # Try simple string format
613
- training_match = re.search(r"training_plan=['\"]([^'\"]*)['\"]", plan_str)
614
- if training_match:
615
- training_raw = training_match.group(1)
616
- # Split by common delimiters and format as bullet points
617
- if ',' in training_raw:
618
- training_items = [item.strip() for item in training_raw.split(',')]
619
- training_plan = "\n".join(f"• {item}" for item in training_items if item)
620
- else:
621
- training_plan = f"• {training_raw}"
622
-
623
- # Extract meal plan - handle both list format and simple string format
624
- meal_plan = ""
625
-
626
- # Try list format first (with brackets and quotes)
627
- meal_match = re.search(r"meal_plan=['\"](\[.*?\])['\"]", plan_str, re.DOTALL)
628
- if meal_match:
629
- meal_raw = meal_match.group(1)
630
- # Clean up the meal plan format
631
- meal_items = re.findall(r'"([^"]*)"', meal_raw)
632
- meal_plan = "\n".join(f"• {item.strip()}" for item in meal_items if item.strip())
633
- else:
634
- # Try simple string format
635
- meal_match = re.search(r"meal_plan=['\"]([^'\"]*)['\"]", plan_str)
636
- if meal_match:
637
- meal_raw = meal_match.group(1)
638
- # Handle multi-line format with dashes or bullet points
639
- if '\n-' in meal_raw or '\n•' in meal_raw:
640
- # Already has bullet points, just clean up
641
- meal_plan = meal_raw.strip()
642
- elif ',' in meal_raw:
643
- # Split by commas and format as bullet points
644
- meal_items = [item.strip() for item in meal_raw.split(',')]
645
- meal_plan = "\n".join(f"• {item}" for item in meal_items if item)
646
- else:
647
- meal_plan = f"• {meal_raw}"
648
-
649
- # Format as markdown
650
- formatted_plan = f"""# 🏋️ {name}
651
-
652
- ## 💪 Training Plan
653
- {training_plan}
654
-
655
- ## 🥗 Meal Plan
656
- {meal_plan}
657
-
658
- ---
659
- *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
660
-
661
- return formatted_plan
662
-
663
- except Exception as e:
664
- logger.error(f"Error parsing fitness plan from string: {str(e)}")
665
- # Fallback to basic formatting
666
- return f"**Fitness Plan**\n\n{plan_str}"
667
-
668
-
669
- def format_fitness_plan(plan_obj: Any, style: str = "default") -> str:
670
- """
671
- Format a FitnessPlan object into a structured markdown string
672
-
673
- Args:
674
- plan_obj: The fitness plan object
675
- style: Formatting style ("default", "minimal", "detailed")
676
-
677
- Returns:
678
- Formatted markdown string
679
- """
680
- try:
681
- if not (hasattr(plan_obj, 'name') and
682
- hasattr(plan_obj, 'training_plan') and
683
- hasattr(plan_obj, 'meal_plan')):
684
- return str(plan_obj)
685
-
686
- if style == "minimal":
687
- return f"""**{plan_obj.name}**
688
-
689
- **Training:** {plan_obj.training_plan}
690
-
691
- **Meals:** {plan_obj.meal_plan}"""
692
-
693
- elif style == "detailed":
694
- return f"""# 🏋️ {plan_obj.name}
695
-
696
- ## 💪 Training Plan
697
- {plan_obj.training_plan}
698
-
699
- ## 🥗 Meal Plan
700
- {plan_obj.meal_plan}
701
-
702
- ## 📝 Additional Notes
703
- - Follow the plan consistently for best results
704
- - Adjust portions based on your energy levels
705
- - Stay hydrated throughout your workouts
706
- - Rest days are important for recovery
707
-
708
- ---
709
- *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
710
-
711
- else: # default
712
- return f"""# 🏋️ {plan_obj.name}
713
-
714
- ## 💪 Training Plan
715
- {plan_obj.training_plan}
716
-
717
- ## 🥗 Meal Plan
718
- {plan_obj.meal_plan}
719
-
720
- ---
721
- *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
722
-
723
- except Exception as e:
724
- logger.error(f"Error formatting fitness plan: {str(e)}")
725
- return str(plan_obj)
726
-
727
-
728
- def extract_response_content(result: Any) -> str:
729
- """
730
- Extract content from agent response with proper error handling
731
-
732
- Args:
733
- result: Agent response object
734
-
735
- Returns:
736
- Formatted response string
737
- """
738
- try:
739
- # Handle different response types
740
- if hasattr(result, 'final_output'):
741
- response_data = result.final_output
742
- else:
743
- response_data = result
744
-
745
- # Check if this is a structured FitnessPlan output
746
- if (hasattr(response_data, 'name') and
747
- hasattr(response_data, 'training_plan') and
748
- hasattr(response_data, 'meal_plan')):
749
- logger.info(f"Detected fitness plan object: {response_data.name}")
750
- return format_fitness_plan(response_data)
751
-
752
- # Check if the response_data is a string representation of a fitness plan
753
- response_str = str(response_data)
754
- if ("name=" in response_str and "training_plan=" in response_str and "meal_plan=" in response_str):
755
- logger.info("Detected fitness plan in string format, attempting to parse")
756
- return parse_fitness_plan_from_string(response_str)
757
-
758
- elif isinstance(response_data, str):
759
- return response_data
760
- else:
761
- return str(response_data)
762
-
763
- except Exception as e:
764
- logger.error(f"Error extracting response content: {str(e)}")
765
- return "Sorry, I had trouble formatting my response. Please try asking again."
766
-
767
-
768
- def stream_response(response: str, history: List[Dict], chunk_size: int = 3) -> Generator[List[Dict], None, None]:
769
- """
770
- Stream response text with configurable chunk size for better UX
771
-
772
- Args:
773
- response: Response text to stream
774
- history: Current chat history
775
- chunk_size: Number of characters per chunk
776
-
777
- Yields:
778
- Updated history with streaming response
779
- """
780
- try:
781
- history.append({"role": "assistant", "content": ""})
782
-
783
- # Stream in chunks rather than character by character for better performance
784
- for i in range(0, len(response), chunk_size):
785
- chunk = response[i:i + chunk_size]
786
- history[-1]["content"] += chunk
787
- time.sleep(0.01) # Faster streaming
788
- yield history
789
-
790
- except Exception as e:
791
- logger.error(f"Error streaming response: {str(e)}")
792
- # Fallback to showing full response
793
- history[-1]["content"] = response
794
- yield history
795
-
796
-
797
- def bot_with_real_streaming(history: List[Dict], model_name: str = None) -> Generator[List[Dict], None, None]:
798
- """
799
- Bot function with real-time streaming from the agent using Runner.run_streamed
800
-
801
- Args:
802
- history: Current Gradio chat history (for display only)
803
- model_name: Model to use for the agent
804
-
805
- Yields:
806
- Updated history with real-time streaming response
807
- """
808
- try:
809
- # Get agent instance with specified model
810
- agent = get_or_create_agent(model_name)
811
-
812
- # Get input for agent from conversation manager
813
- agent_input = conversation_manager.get_input_for_agent()
814
- logger.info(f"Sending to agent ({current_model}): {type(agent_input)} - {conversation_manager.get_history_summary()}")
815
-
816
- # Add empty assistant message for streaming
817
- history.append({"role": "assistant", "content": ""})
818
-
819
- def _run_streaming():
820
- """Synchronous wrapper for streaming execution with Anthropic fallback"""
821
- try:
822
- loop = asyncio.new_event_loop()
823
- asyncio.set_event_loop(loop)
824
- try:
825
- async def collect_streaming():
826
- """Collect results from the streaming agent with fallback handling"""
827
- content_chunks = []
828
- final_result = None
829
- streaming_worked = False
830
-
831
- try:
832
- async for chunk in run_agent_with_streaming(agent, agent_input):
833
- streaming_worked = True
834
- if chunk['type'] == 'content_chunk':
835
- # Real-time content update
836
- content_chunks.append(chunk['accumulated'])
837
- elif chunk['type'] == 'final_result':
838
- # Final result for conversation management
839
- final_result = chunk['result']
840
- # Ensure we have the final content
841
- if chunk['content'] and chunk['content'] not in content_chunks:
842
- content_chunks.append(chunk['content'])
843
- elif chunk['type'] == 'error':
844
- # Error handling
845
- final_result = chunk['result']
846
- content_chunks.append(chunk['content'])
847
-
848
- except Exception as stream_error:
849
- logger.warning(f"Streaming failed completely: {stream_error}")
850
- # Ultimate fallback - use the sync method
851
- if not streaming_worked:
852
- logger.info("Attempting sync fallback for Anthropic compatibility")
853
- try:
854
- sync_result = await Runner.run(agent, agent_input)
855
- final_result = sync_result
856
-
857
- # Extract content
858
- if hasattr(sync_result, 'final_output'):
859
- content = str(sync_result.final_output)
860
- else:
861
- content = str(sync_result)
862
-
863
- content_chunks.append(content)
864
-
865
- except Exception as sync_error:
866
- logger.error(f"Both streaming and sync execution failed: {sync_error}")
867
- content_chunks.append(f"Sorry, I encountered an error: {str(sync_error)}")
868
-
869
- # Update conversation manager with final result
870
- if final_result:
871
- conversation_manager.update_from_result(final_result)
872
- logger.info(f"Updated conversation manager. {conversation_manager.get_history_summary()}")
873
-
874
- # Process content chunks through extract_response_content for proper formatting
875
- processed_chunks = []
876
- for content in content_chunks:
877
- # Create a mock result object to use extract_response_content
878
- class MockContentResult:
879
- def __init__(self, content):
880
- self.final_output = content
881
-
882
- mock_result = MockContentResult(content)
883
- formatted_content = extract_response_content(mock_result)
884
- processed_chunks.append(formatted_content)
885
-
886
- return processed_chunks
887
-
888
- return loop.run_until_complete(collect_streaming())
889
-
890
- finally:
891
- loop.close()
892
- except Exception as e:
893
- logger.error(f"Streaming execution error: {str(e)}")
894
- return [f"Sorry, I encountered an error: {str(e)}"]
895
-
896
- # Execute streaming and yield updates
897
- try:
898
- with ThreadPoolExecutor(max_workers=1) as executor:
899
- future = executor.submit(_run_streaming)
900
- streaming_results = future.result(timeout=120) # Increased timeout for fallback
901
-
902
- # Stream the content updates to the UI
903
- if streaming_results:
904
- for i, content in enumerate(streaming_results):
905
- history[-1]["content"] = content
906
- yield history
907
- # Add a small delay between updates for visual effect
908
- if i < len(streaming_results) - 1: # Don't delay on the last update
909
- time.sleep(0.1)
910
- else:
911
- # No content received
912
- history[-1]["content"] = "I apologize, but I didn't receive a response. Please try again."
913
- yield history
914
-
915
- except Exception as e:
916
- logger.error(f"Error in streaming execution: {str(e)}")
917
- history[-1]["content"] = "Sorry, I had trouble processing your request."
918
- yield history
919
-
920
- except Exception as e:
921
- logger.error(f"Bot streaming function error: {str(e)}")
922
- if len(history) == 0 or history[-1].get("role") != "assistant":
923
- history.append({"role": "assistant", "content": ""})
924
- history[-1]["content"] = "I apologize, but I'm experiencing technical difficulties. Please try again in a moment."
925
- yield history
926
-
927
-
928
- def bot(history: List[Dict], model_name: str = None) -> Generator[List[Dict], None, None]:
929
- """
930
- Main bot function with comprehensive error handling and improved UX using manual conversation management
931
-
932
- Args:
933
- history: Current Gradio chat history (for display only)
934
- model_name: Model to use for the agent
935
-
936
- Yields:
937
- Updated history with bot response
938
- """
939
- try:
940
- # Get agent instance with specified model
941
- agent = get_or_create_agent(model_name)
942
-
943
- # Get input for agent from conversation manager
944
- agent_input = conversation_manager.get_input_for_agent()
945
- logger.info(f"Sending to agent ({current_model}): {type(agent_input)} - {conversation_manager.get_history_summary()}")
946
-
947
- # Run agent safely with sync wrapper
948
- result = run_agent_safely_sync(agent, agent_input)
949
-
950
- # Update conversation manager with the result
951
- conversation_manager.update_from_result(result)
952
- logger.info(f"Updated conversation manager. {conversation_manager.get_history_summary()}")
953
-
954
- # Extract and format response for display
955
- response = extract_response_content(result)
956
-
957
- # Stream the response
958
- yield from stream_response(response, history)
959
-
960
- except Exception as e:
961
- logger.error(f"Bot function error: {str(e)}")
962
- error_response = "I apologize, but I'm experiencing technical difficulties. Please try again in a moment."
963
- yield from stream_response(error_response, history)
964
-
965
-
966
- def dynamic_bot(history: List[Dict], use_real_streaming: bool = True, model_name: str = None) -> Generator[List[Dict], None, None]:
967
- """
968
- Dynamic bot function that can switch between streaming modes
969
-
970
- Args:
971
- history: Current Gradio chat history (for display only)
972
- use_real_streaming: Whether to use real-time streaming from agent
973
- model_name: Model to use for the agent
974
-
975
- Yields:
976
- Updated history with bot response
977
- """
978
- if use_real_streaming:
979
- logger.info("Using real-time streaming mode")
980
- yield from bot_with_real_streaming(history, model_name)
981
- else:
982
- logger.info("Using simulated streaming mode")
983
- yield from bot(history, model_name)
984
-
985
-
986
- def clear_conversation() -> List[Dict]:
987
- """
988
- Clear the conversation history
989
-
990
- Returns:
991
- Empty chat history
992
- """
993
- global conversation_manager
994
- conversation_manager.clear_history()
995
- logger.info("Conversation history cleared")
996
- return []
997
-
998
-
999
- # Gradio Interface
1000
- with gr.Blocks(
1001
- theme=gr.themes.Soft(),
1002
- title="Fitness AI Assistant",
1003
- css="""
1004
- .gradio-container {
1005
- max-width: 1200px !important;
1006
- }
1007
- #chatbot {
1008
- height: 600px;
1009
- }
1010
- .model-info {
1011
- background: linear-gradient(135deg, rgba(55, 65, 81, 0.9), rgba(75, 85, 99, 0.7)) !important;
1012
- color: #e5e7eb !important;
1013
- padding: 16px !important;
1014
- border-radius: 12px !important;
1015
- border-left: 4px solid #10b981 !important;
1016
- margin: 12px 0 !important;
1017
- border: 1px solid rgba(75, 85, 99, 0.4) !important;
1018
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
1019
- backdrop-filter: blur(10px) !important;
1020
- }
1021
- .model-info p {
1022
- color: #e5e7eb !important;
1023
- margin: 8px 0 !important;
1024
- line-height: 1.5 !important;
1025
- }
1026
- .model-info strong {
1027
- color: #f9fafb !important;
1028
- font-weight: 600 !important;
1029
- }
1030
- .model-info em {
1031
- color: #d1d5db !important;
1032
- font-style: italic;
1033
- }
1034
- .model-info code {
1035
- background-color: rgba(31, 41, 55, 0.8) !important;
1036
- color: #10b981 !important;
1037
- padding: 2px 6px !important;
1038
- border-radius: 4px !important;
1039
- font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important;
1040
- font-size: 0.9em !important;
1041
- }
1042
- .model-dropdown {
1043
- font-weight: bold;
1044
- }
1045
- /* Ensure all text in model-info respects dark theme */
1046
- .model-info * {
1047
- color: inherit !important;
1048
- }
1049
- /* Fix for any remaining white background issues */
1050
- .model-info .prose {
1051
- color: #e5e7eb !important;
1052
- }
1053
- """
1054
- ) as demo:
1055
-
1056
- gr.Markdown("""
1057
- # 🏋️‍♀️ Fitness AI Assistant
1058
- Your personal fitness companion for workout plans, meal planning, and fitness guidance!
1059
-
1060
- 💡 **Tips:**
1061
- - Be specific about your fitness goals
1062
- - Mention any physical limitations or preferences
1063
- - Ask for modifications if needed
1064
- - Choose your preferred AI model for different capabilities
1065
- """)
1066
-
1067
- # Model selection section
1068
- with gr.Row():
1069
- with gr.Column():
1070
- gr.Markdown("### 🤖 AI Model Selection")
1071
- gr.Markdown("Browse and select your preferred AI model. Click on a row to select it.")
1072
-
1073
- # Create model table data
1074
- table_data = FitnessAgent.get_models_table_data()
1075
-
1076
- model_table = gr.DataFrame(
1077
- value=table_data,
1078
- headers=["⭐", "Provider", "Model Name", "Capability", "Speed", "Cost", "Description"],
1079
- datatype=["str", "str", "str", "str", "str", "str", "str"],
1080
- interactive=False,
1081
- wrap=True,
1082
- elem_classes=["model-table"]
1083
- )
1084
-
1085
- # Hidden components to manage selection
1086
- selected_model = gr.Textbox(
1087
- value="claude-3.5-haiku",
1088
- visible=False,
1089
- label="Selected Model"
1090
- )
1091
-
1092
- # Model selection button
1093
- with gr.Row():
1094
- model_filter = gr.Dropdown(
1095
- choices=["All Models", "🔵 Anthropic Only", "🟢 OpenAI Only", "⭐ Recommended Only"],
1096
- value="All Models",
1097
- label="Filter Models",
1098
- scale=3
1099
- )
1100
-
1101
- # Model information display
1102
- model_info_display = gr.Markdown(
1103
- value=f"""🔵 **Current Model:** `claude-3.5-haiku`
1104
-
1105
- 💡 **Description:** {FitnessAgent.get_model_info('claude-3.5-haiku')}
1106
-
1107
- 📊 **Status:** Ready to chat!""",
1108
- visible=True,
1109
- elem_classes=["model-info"]
1110
- )
1111
-
1112
- chatbot = gr.Chatbot(
1113
- elem_id="chatbot",
1114
- type="messages",
1115
- show_copy_button=True,
1116
- show_share_button=False,
1117
- avatar_images=None,
1118
- sanitize_html=True,
1119
- render_markdown=True
1120
  )
1121
 
1122
- chat_input = gr.MultimodalTextbox(
1123
- interactive=True,
1124
- file_count="multiple",
1125
- placeholder="Ask me about fitness, request a workout plan, or get meal planning advice...",
1126
- show_label=False,
1127
- sources=["microphone", "upload"],
1128
- )
1129
-
1130
- # Add clear conversation button and streaming toggle
1131
- with gr.Row():
1132
- clear_btn = gr.Button("🗑️ Clear Conversation", variant="secondary", size="sm")
1133
- streaming_toggle = gr.Checkbox(
1134
- label="🚀 Enable Real-time Streaming",
1135
- value=True,
1136
- info="Stream responses in real-time as the agent generates them"
1137
- )
1138
-
1139
- # Add example buttons for common requests
1140
- with gr.Row():
1141
- gr.Examples(
1142
- examples=[
1143
- "Create a beginner workout plan for me",
1144
- "I want to lose weight - help me with a fitness plan",
1145
- "Design a muscle building program for intermediate level",
1146
- "I need a meal plan for gaining muscle mass",
1147
- "What exercises should I do for better cardiovascular health?",
1148
- "Help me with a home workout routine with no equipment"
1149
- ],
1150
- inputs=chat_input,
1151
- label="💡 Try asking:"
1152
- )
1153
-
1154
- # Add helpful information
1155
- with gr.Accordion("ℹ️ How to use this assistant", open=False):
1156
- gr.Markdown("""
1157
- **What I can help you with:**
1158
- - Create personalized workout plans
1159
- - Design meal plans for your goals
1160
- - Provide fitness guidance and tips
1161
- - Suggest exercises for specific needs
1162
- - Help modify existing plans
1163
-
1164
- **To get the best results:**
1165
- - Tell me your fitness level (beginner, intermediate, advanced)
1166
- - Mention your goals (weight loss, muscle gain, general fitness)
1167
- - Include any equipment you have access to
1168
- - Let me know about any injuries or limitations
1169
-
1170
- **AI Model Selection:**
1171
- - **🔵 Anthropic Claude Models**: Excellent for detailed reasoning and analysis
1172
- - Claude-4: Most capable (premium), Claude-3.7: Extended thinking
1173
- - Claude-3.5: Balanced performance, Claude-3: Fast and cost-effective
1174
- - **🟢 OpenAI GPT Models**: Great for general tasks and familiar interface
1175
- - GPT-4o: Latest with vision, GPT-4 Turbo: Large context window
1176
- - GPT-3.5: Fast and economical, o1/o3: Advanced reasoning
1177
- - You can change models anytime - the conversation continues seamlessly
1178
- - Mix and match providers based on your preferences
1179
-
1180
- **Conversation Management:**
1181
- - The assistant remembers our entire conversation
1182
- - You can refer back to previous plans or discussions
1183
- - Use the "Clear Conversation" button to start fresh
1184
- - Each conversation maintains context across multiple exchanges
1185
-
1186
- **Streaming Options:**
1187
- - **Real-time Streaming**: Responses appear as the AI generates them using `Runner.run_streamed()` (most engaging)
1188
- - **Simulated Streaming**: Responses are generated fully, then displayed with typing effect (more reliable)
1189
- - Toggle the streaming mode using the checkbox above
1190
- - Real-time streaming shows tool calls, outputs, and message generation in real-time
1191
- - **Note**: Anthropic models automatically fall back to non-streaming if validation errors occur
1192
- """)
1193
-
1194
- # Add model comparison section
1195
- with gr.Accordion("🤖 Model Comparison Guide", open=False):
1196
- gr.Markdown("""
1197
- ## 🔵 Anthropic Claude Models
1198
-
1199
- | Model | Capability | Speed | Cost | Best For |
1200
- |-------|------------|--------|------|----------|
1201
- | claude-4-opus | ★★★★★ | ★★★��☆ | ★★★★★ | Complex analysis, detailed plans |
1202
- | claude-4-sonnet | ★★★★☆ | ★★★★☆ | ★★★★☆ | Balanced high performance |
1203
- | claude-3.7-sonnet | ★★★★☆ | ★★★★☆ | ★★★☆☆ | Extended thinking, complex tasks |
1204
- | claude-3.5-sonnet | ★★★★☆ | ★★★★☆ | ★★★☆☆ | General use, balanced |
1205
- | claude-3.5-haiku | ★★★☆☆ | ★★★★★ | ★★☆☆☆ | **DEFAULT** - Fast responses |
1206
- | claude-3-haiku | ★★★☆☆ | ★★★★★ | ★☆☆☆☆ | Most cost-effective |
1207
-
1208
- ## 🟢 OpenAI GPT Models
1209
-
1210
- | Model | Capability | Speed | Cost | Best For |
1211
- |-------|------------|--------|------|----------|
1212
- | gpt-4o | ★★★★★ | ★★★★☆ | ★★★★☆ | Latest features, vision support |
1213
- | gpt-4o-mini | ★★★★☆ | ★★★★★ | ★★☆☆☆ | Balanced performance, affordable |
1214
- | gpt-4-turbo | ★★★★☆ | ★★★★☆ | ★★★★☆ | Large context, reliable |
1215
- | gpt-3.5-turbo | ★★★☆☆ | ★★★★★ | ★☆☆☆☆ | Fast and economical |
1216
- | o1-preview | ★★★★★ | ★★☆☆☆ | ★★★★★ | Advanced reasoning |
1217
- | o1-mini | ★★★★☆ | ★★★☆☆ | ★★★☆☆ | Reasoning tasks |
1218
- | o3-mini | ★★★★☆ | ★★★★☆ | ★★★☆☆ | Latest reasoning model |
1219
-
1220
- ### 💡 Provider Comparison
1221
- - **🔵 Anthropic**: Excellent for detailed analysis, safety-focused, great for complex fitness planning
1222
- - **🟢 OpenAI**: Familiar interface, good general performance, strong tool usage
1223
-
1224
- ### 🎯 Recommendations by Use Case
1225
- - **Quick questions**: claude-3.5-haiku, gpt-4o-mini, gpt-3.5-turbo
1226
- - **Comprehensive plans**: claude-3.5-sonnet, gpt-4o, claude-3.7-sonnet
1227
- - **Complex analysis**: claude-4-opus, gpt-4o, o1-preview
1228
- - **Budget-conscious**: claude-3-haiku, gpt-3.5-turbo, gpt-4o-mini
1229
- """)
1230
-
1231
- # Event handlers
1232
- chat_msg = chat_input.submit(
1233
- add_message, [chatbot, chat_input], [chatbot, chat_input]
1234
- )
1235
- bot_msg = chat_msg.then(
1236
- dynamic_bot,
1237
- [chatbot, streaming_toggle, selected_model],
1238
- chatbot,
1239
- api_name="bot_response"
1240
- )
1241
- bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
1242
-
1243
- # Model table filtering
1244
- model_filter.change(
1245
- filter_model_table,
1246
- inputs=[model_filter],
1247
- outputs=[model_table]
1248
- )
1249
-
1250
- # Model selection from table
1251
- model_table.select(
1252
- select_model_from_table,
1253
- inputs=[model_table],
1254
- outputs=[selected_model, model_info_display]
1255
- )
1256
-
1257
- # Clear conversation handler
1258
- clear_btn.click(clear_conversation, None, chatbot)
1259
-
1260
- chatbot.like(print_like_dislike, None, None, like_user_message=True)
1261
-
1262
-
1263
  if __name__ == "__main__":
1264
- demo.launch(
 
1265
  server_name="0.0.0.0",
1266
  server_port=7860,
1267
- show_error=True,
1268
- debug=False
1269
  )
 
1
+ """
2
+ Main Gradio app entry point for Hugging Face Spaces
3
+ """
4
+ import os
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ # Add the necessary paths
9
+ current_dir = Path(__file__).parent
10
+ root_dir = current_dir.parent
11
+
12
+ # Add shared library to path
13
+ shared_path = root_dir / "shared" / "src"
14
+ if str(shared_path) not in sys.path:
15
+ sys.path.insert(0, str(shared_path))
16
+
17
+ # Add gradio app to path
18
+ gradio_app_path = root_dir / "apps" / "gradio-app" / "src"
19
+ if str(gradio_app_path) not in sys.path:
20
+ sys.path.insert(0, str(gradio_app_path))
21
+
22
+ # Import required modules
23
  import gradio as gr
 
 
 
 
 
 
 
 
24
 
25
+ try:
26
+ # Try to import the main Gradio UI
27
+ from fitness_gradio.ui import create_fitness_app
28
+ from fitness_core import setup_logging, Config, get_logger
29
+
30
+ # Configure logging
31
+ setup_logging(level=Config.LOG_LEVEL, log_file=Config.LOG_FILE)
32
+ logger = get_logger(__name__)
33
+
34
+ # Create the main app
35
+ app = create_fitness_app()
36
+
37
+ except ImportError as e:
38
+ print(f"Warning: Could not import fitness modules: {e}")
39
+ print("Creating fallback Gradio interface...")
40
+
41
+ def respond(message, history):
42
+ """Fallback response function."""
43
+ return "I'm a fitness AI assistant. I'm currently loading my capabilities. Please try again in a moment, or ensure all dependencies are properly installed."
44
+
45
+ # Create fallback interface
46
+ app = gr.ChatInterface(
47
+ respond,
48
+ title="🏋️ Fitness AI Assistant",
49
+ description="Your personal AI-powered fitness and nutrition coach. I can help with workout plans, nutrition advice, and health guidance.",
50
+ examples=[
51
+ "Create a beginner workout plan for me",
52
+ "What should I eat for muscle gain?",
53
+ "How can I lose weight safely?",
54
+ "Design a 30-minute home workout"
55
+ ],
56
+ cache_examples=True,
57
+ theme=gr.themes.Soft()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  )
59
 
60
+ # For Hugging Face Spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  if __name__ == "__main__":
62
+ # Launch the app
63
+ app.launch(
64
  server_name="0.0.0.0",
65
  server_port=7860,
66
+ share=False,
67
+ show_error=True
68
  )
fitness_agent/fitness_agent.py CHANGED
@@ -1,394 +1,87 @@
1
- from pydantic import BaseModel
2
- from agents import Agent, Runner
3
- from dotenv import load_dotenv
 
4
  import os
5
- from typing import Optional
6
-
7
- load_dotenv()
8
-
9
-
10
- class FitnessPlan(BaseModel):
11
- name: str
12
- training_plan: str
13
- meal_plan: str
14
-
15
-
16
- class FitnessAgent(Agent):
17
- """
18
- A helpful assistant for general fitness guidance and handoffs to a plan-building agent.
19
-
20
- Supports multiple AI providers via LiteLLM (as of January 2025):
 
 
 
 
 
21
 
22
- Anthropic models:
23
- - Claude-4: claude-opus-4-20250514, claude-sonnet-4-20250514 (Premium)
24
- - Claude-3.7: claude-3-7-sonnet-20250219 (Extended thinking)
25
- - Claude-3.5: claude-3-5-sonnet-20241022 (latest), claude-3-5-sonnet-20240620 (stable), claude-3-5-haiku-20241022 (fast)
26
- - Claude-3: claude-3-haiku-20240307 (legacy but reliable)
27
 
28
- OpenAI models:
29
- - GPT-4o: gpt-4o, gpt-4o-mini (Vision + latest capabilities)
30
- - GPT-4: gpt-4-turbo (Legacy but stable)
31
- - GPT-3.5: gpt-3.5-turbo (Cost-effective)
32
- - Reasoning: o1-preview, o1-mini, o3-mini (Advanced reasoning)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- Note: Some older models may be deprecated. Always check provider documentation.
35
- """
36
-
37
- # Available models via LiteLLM and native OpenAI
38
- # Updated to include both Anthropic and OpenAI models as of January 2025
39
- SUPPORTED_MODELS = {
40
- # === ANTHROPIC MODELS (via LiteLLM) ===
41
- # Claude-4 models (latest generation - may require special access)
42
- "claude-4-opus": "litellm/anthropic/claude-opus-4-20250514",
43
- "claude-4-sonnet": "litellm/anthropic/claude-sonnet-4-20250514",
44
 
45
- # Claude-3.7 models (newest stable)
46
- "claude-3.7-sonnet": "litellm/anthropic/claude-3-7-sonnet-20250219",
47
 
48
- # Claude-3.5 models (widely available)
49
- "claude-3.5-sonnet-latest": "litellm/anthropic/claude-3-5-sonnet-20241022", # Latest version
50
- "claude-3.5-sonnet": "litellm/anthropic/claude-3-5-sonnet-20240620", # Previous stable version
51
- "claude-3.5-haiku": "litellm/anthropic/claude-3-5-haiku-20241022", # New Haiku 3.5 model
52
-
53
- # Claude-3 models (legacy but still available)
54
- "claude-3-haiku": "litellm/anthropic/claude-3-haiku-20240307",
55
-
56
- # === OPENAI MODELS (native) ===
57
- # GPT-4o models (latest generation with vision)
58
- "gpt-4o": "gpt-4o", # Latest GPT-4o model
59
- "gpt-4o-mini": "gpt-4o-mini", # Compact version
60
-
61
- # GPT-4 models (previous generation)
62
- "gpt-4-turbo": "gpt-4-turbo", # Latest GPT-4 Turbo
63
- "gpt-4": "gpt-4", # Original GPT-4
64
-
65
- # GPT-3.5 models (cost-effective)
66
- "gpt-3.5-turbo": "gpt-3.5-turbo", # Latest 3.5 turbo
67
-
68
- # Reasoning models (o-series)
69
- "o1-preview": "o1-preview", # Advanced reasoning
70
- "o1-mini": "o1-mini", # Compact reasoning
71
- "o3-mini": "o3-mini", # Latest reasoning model
72
- }
73
-
74
- def __init__(self, model_name: Optional[str] = None):
75
- """
76
- Initialize the Fitness Agent with configurable AI model (Anthropic or OpenAI).
77
 
78
- Args:
79
- model_name: Name of the AI model to use. Can be a key from SUPPORTED_MODELS
80
- or a full model identifier. Defaults to claude-3.5-haiku if not specified.
81
- Can also be set via AI_MODEL, ANTHROPIC_MODEL, or OPENAI_MODEL environment variables.
82
- """
83
- # Determine which model to use
84
- if model_name is None:
85
- # Check environment variables in priority order
86
- model_name = (
87
- os.getenv("AI_MODEL") or
88
- os.getenv("ANTHROPIC_MODEL") or
89
- os.getenv("OPENAI_MODEL") or
90
- "claude-3.5-haiku" # Default fallback
91
- )
92
-
93
- # Validate the model name
94
- is_valid, validation_message = self.validate_model_name(model_name)
95
- if not is_valid:
96
- print(f"Warning: {validation_message}")
97
- print(f"Falling back to default model: claude-3.5-haiku")
98
- model_name = "claude-3.5-haiku"
99
-
100
- # Resolve model name to full identifier
101
- if model_name in self.SUPPORTED_MODELS:
102
- full_model_name = self.SUPPORTED_MODELS[model_name]
103
- else:
104
- # Assume it's already a full model identifier
105
- full_model_name = model_name
106
-
107
- # Determine if this is an OpenAI model or needs LiteLLM prefix
108
- if self._is_openai_model(model_name, full_model_name):
109
- # Use native OpenAI model (no prefix needed)
110
- final_model = full_model_name
111
- else:
112
- # For Anthropic models, use the full model name as-is since it already has litellm/ prefix
113
- final_model = full_model_name
114
-
115
- # Store the model information for debugging
116
- self.model_name = model_name
117
- self.full_model_name = full_model_name
118
- self.final_model = final_model
119
- self.provider = self._get_provider(model_name, full_model_name)
120
-
121
- fitness_plan_agent = Agent(
122
- name="Fitness Plan Assistant",
123
- instructions="You are a helpful assistant for creating personalized fitness plans.",
124
- model=final_model,
125
- output_type=FitnessPlan
126
- )
127
-
128
- super().__init__(
129
- name="Fitness Assistant",
130
- model=final_model,
131
- instructions="""
132
- You are a helpful assistant for fitness-related queries.
133
-
134
- If the user wants to create a fitness plan, hand them off to the Fitness Plan Assistant.
135
- """,
136
- handoffs=[fitness_plan_agent]
137
- )
138
-
139
- def _is_openai_model(self, model_name: str, full_model_name: str) -> bool:
140
- """Check if this is an OpenAI model that should use native API."""
141
- # Check direct model name matches
142
- openai_models = ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo", "o1-preview", "o1-mini", "o3-mini"]
143
- if model_name in openai_models:
144
- return True
145
-
146
- # Check for OpenAI indicators in model names
147
- openai_prefixes = ["gpt-", "o1-", "o3-", "openai/", "litellm/openai/"]
148
- for prefix in openai_prefixes:
149
- if prefix in model_name.lower() or prefix in full_model_name.lower():
150
- return True
151
-
152
- return False
153
-
154
- def _get_provider(self, model_name: str, full_model_name: str) -> str:
155
- """Determine the provider based on the model."""
156
- if self._is_openai_model(model_name, full_model_name):
157
- return "openai"
158
- elif "claude" in model_name.lower() or "anthropic" in full_model_name.lower():
159
- return "anthropic"
160
- else:
161
- return "unknown"
162
-
163
- @classmethod
164
- def list_supported_models(cls) -> dict:
165
- """Return a dictionary of supported model names and their full identifiers."""
166
- return cls.SUPPORTED_MODELS.copy()
167
-
168
- @classmethod
169
- def get_model_info(cls, model_name: str) -> str:
170
- """Get information about a specific model."""
171
- model_info = {
172
- # === ANTHROPIC MODELS ===
173
- "claude-4-opus": "Most capable and intelligent model. Superior reasoning, complex tasks (Premium tier)",
174
- "claude-4-sonnet": "High-performance model with exceptional reasoning and efficiency (Premium tier)",
175
- "claude-3.7-sonnet": "Enhanced model with extended thinking capabilities (Recommended)",
176
- "claude-3.5-sonnet-latest": "Latest Claude 3.5 Sonnet with improved capabilities (Recommended)",
177
- "claude-3.5-sonnet": "Excellent balance of intelligence and speed (Stable version)",
178
- "claude-3.5-haiku": "Fast and compact model for near-instant responsiveness (New!)",
179
- "claude-3-haiku": "Fastest model, good for simple tasks and cost-effective (Legacy but reliable)",
180
-
181
- # === OPENAI MODELS ===
182
- "gpt-4o": "Latest GPT-4o with vision, web browsing, and advanced capabilities (Recommended)",
183
- "gpt-4o-mini": "Compact GPT-4o model - fast, capable, and cost-effective (Recommended)",
184
- "gpt-4-turbo": "GPT-4 Turbo with large context window and improved efficiency",
185
- "gpt-4": "Original GPT-4 model - highly capable but slower than turbo variants",
186
- "gpt-3.5-turbo": "Fast and cost-effective model, good for straightforward tasks",
187
- "o1-preview": "Advanced reasoning model with enhanced problem-solving (Preview)",
188
- "o1-mini": "Compact reasoning model for faster inference with good capabilities",
189
- "o3-mini": "Latest reasoning model with improved performance (New!)",
190
- }
191
- return model_info.get(model_name, "Model information not available")
192
-
193
- @classmethod
194
- def get_recommended_models(cls) -> list:
195
- """Get a list of recommended models that are most likely to be available."""
196
- return [
197
- # Anthropic recommendations (most reliable first)
198
- "claude-3.5-haiku", # New fast model, default
199
- "claude-3-haiku", # Most reliable, widely available, cost-effective
200
- "claude-3.5-sonnet", # Stable version, widely available
201
- "claude-3.5-sonnet-latest", # Latest improvements
202
- "claude-3.7-sonnet", # Newest stable with extended thinking
203
-
204
- # OpenAI recommendations
205
- "gpt-4o-mini", # Best balance of capability and cost
206
- "gpt-4o", # Latest flagship model
207
- "gpt-3.5-turbo", # Most cost-effective OpenAI model
208
- "gpt-4-turbo", # Solid previous generation
209
- "o1-mini", # Good reasoning capabilities
210
- ]
211
-
212
- @classmethod
213
- def get_models_by_provider(cls) -> dict:
214
- """Get models organized by provider."""
215
- models = cls.list_supported_models()
216
- providers = {
217
- "anthropic": {},
218
- "openai": {},
219
- "unknown": {}
220
- }
221
-
222
- for name, full_name in models.items():
223
- if "claude" in name.lower() or "anthropic" in full_name.lower():
224
- providers["anthropic"][name] = full_name
225
- elif any(indicator in name.lower() for indicator in ["gpt-", "o1-", "o3-", "openai/"]):
226
- providers["openai"][name] = full_name
227
- else:
228
- providers["unknown"][name] = full_name
229
-
230
- return providers
231
-
232
- @classmethod
233
- def get_models_table_data(cls) -> list:
234
- """Get model data formatted for table display."""
235
- models = cls.list_supported_models()
236
- table_data = []
237
-
238
- # Define capability ratings
239
- capability_ratings = {
240
- # Anthropic models
241
- "claude-4-opus": "★★★★★",
242
- "claude-4-sonnet": "★★★★☆",
243
- "claude-3.7-sonnet": "★★★★☆",
244
- "claude-3.5-sonnet-latest": "★★★★☆",
245
- "claude-3.5-sonnet": "★★★★☆",
246
- "claude-3.5-haiku": "★★★☆☆",
247
- "claude-3-haiku": "★★★☆☆",
248
- # OpenAI models
249
- "gpt-4o": "★★★★★",
250
- "gpt-4o-mini": "★★★★☆",
251
- "gpt-4-turbo": "★★★★☆",
252
- "gpt-4": "★★★★☆",
253
- "gpt-3.5-turbo": "★★★☆☆",
254
- "o1-preview": "★★★★★",
255
- "o1-mini": "★★★★☆",
256
- "o3-mini": "★★★★☆",
257
- }
258
-
259
- # Define speed ratings
260
- speed_ratings = {
261
- # Anthropic models
262
- "claude-4-opus": "★★★☆☆",
263
- "claude-4-sonnet": "★★★★☆",
264
- "claude-3.7-sonnet": "★★★★☆",
265
- "claude-3.5-sonnet-latest": "★★★★☆",
266
- "claude-3.5-sonnet": "★★★★☆",
267
- "claude-3.5-haiku": "★★★★★",
268
- "claude-3-haiku": "★★★★★",
269
- # OpenAI models
270
- "gpt-4o": "★★★★☆",
271
- "gpt-4o-mini": "★★★★★",
272
- "gpt-4-turbo": "★★★★☆",
273
- "gpt-4": "★★★☆☆",
274
- "gpt-3.5-turbo": "★★★★★",
275
- "o1-preview": "★★☆☆☆",
276
- "o1-mini": "★★★☆☆",
277
- "o3-mini": "★★★★☆",
278
- }
279
-
280
- # Define cost ratings (more stars = more expensive)
281
- cost_ratings = {
282
- # Anthropic models
283
- "claude-4-opus": "★★★★★",
284
- "claude-4-sonnet": "★★★★☆",
285
- "claude-3.7-sonnet": "★★★☆☆",
286
- "claude-3.5-sonnet-latest": "★★★☆☆",
287
- "claude-3.5-sonnet": "★★★☆☆",
288
- "claude-3.5-haiku": "★★☆☆☆",
289
- "claude-3-haiku": "★☆☆☆☆",
290
- # OpenAI models
291
- "gpt-4o": "★★★★☆",
292
- "gpt-4o-mini": "★★☆☆☆",
293
- "gpt-4-turbo": "★★★★☆",
294
- "gpt-4": "★★★★☆",
295
- "gpt-3.5-turbo": "★☆☆☆☆",
296
- "o1-preview": "★★★★★",
297
- "o1-mini": "★★★☆☆",
298
- "o3-mini": "★★★☆☆",
299
- }
300
-
301
- recommended = cls.get_recommended_models()
302
-
303
- for model_name, full_path in models.items():
304
- provider = "🔵 Anthropic" if "claude" in model_name.lower() else "🟢 OpenAI"
305
- is_recommended = "⭐" if model_name in recommended else ""
306
-
307
- table_data.append([
308
- is_recommended,
309
- provider,
310
- model_name,
311
- capability_ratings.get(model_name, "★★★☆☆"),
312
- speed_ratings.get(model_name, "★★★☆☆"),
313
- cost_ratings.get(model_name, "★★★☆☆"),
314
- cls.get_model_info(model_name)
315
- ])
316
-
317
- return table_data
318
-
319
- @classmethod
320
- def validate_model_name(cls, model_name: str) -> tuple[bool, str]:
321
- """
322
- Validate if a model name is in our supported list and provide helpful feedback.
323
-
324
- Returns:
325
- tuple: (is_valid, message)
326
- """
327
- if model_name in cls.SUPPORTED_MODELS:
328
- full_name = cls.SUPPORTED_MODELS[model_name]
329
- return True, f"Valid model: {model_name} -> {full_name}"
330
- elif model_name in cls.SUPPORTED_MODELS.values():
331
- return True, f"Valid full model identifier: {model_name}"
332
- else:
333
- recommended = ", ".join(cls.get_recommended_models())
334
- return False, f"Model '{model_name}' not found. Recommended models: {recommended}"
335
-
336
-
337
- if __name__ == "__main__":
338
- # Example usage with different models
339
- print("🤖 Available AI Models (Anthropic + OpenAI):")
340
- print("=" * 60)
341
-
342
- # Show models by provider
343
- providers = FitnessAgent.get_models_by_provider()
344
-
345
- print("🔵 ANTHROPIC MODELS:")
346
- for name, full_id in providers["anthropic"].items():
347
- print(f" • {name}: {full_id}")
348
- print(f" {FitnessAgent.get_model_info(name)}")
349
- print()
350
-
351
- print("🟢 OPENAI MODELS:")
352
- for name, full_id in providers["openai"].items():
353
- print(f" • {name}: {full_id}")
354
- print(f" {FitnessAgent.get_model_info(name)}")
355
- print()
356
-
357
- print("🎯 RECOMMENDED MODELS (most likely to work):")
358
- recommended = FitnessAgent.get_recommended_models()
359
- for model in recommended:
360
- provider_icon = "🔵" if "claude" in model else "🟢" if any(x in model for x in ["gpt", "o1", "o3"]) else "⚪"
361
- print(f" {provider_icon} {model}")
362
-
363
- print("\n" + "="*60 + "\n")
364
-
365
- # Create agent with default model
366
- print("Creating agent with default model (claude-3.5-haiku)...")
367
- agent = FitnessAgent()
368
- print(f"✅ Created agent:")
369
- print(f" Model name: {agent.model_name}")
370
- print(f" Provider: {agent.provider}")
371
- print(f" Final model: {agent.final_model}")
372
-
373
- print("\n" + "="*60 + "\n")
374
-
375
- # Example with OpenAI model
376
- print("Creating agent with OpenAI model (gpt-4o-mini)...")
377
- try:
378
- openai_agent = FitnessAgent("gpt-4o-mini")
379
- print(f"✅ Created OpenAI agent:")
380
- print(f" Model name: {openai_agent.model_name}")
381
- print(f" Provider: {openai_agent.provider}")
382
- print(f" Final model: {openai_agent.final_model}")
383
- except Exception as e:
384
- print(f"⚠️ Could not create OpenAI agent: {e}")
385
- print(" (This is normal if you don't have OPENAI_API_KEY set)")
386
-
387
- print("\n💡 To actually run the agents:")
388
- print(" - Set ANTHROPIC_API_KEY for Claude models")
389
- print(" - Set OPENAI_API_KEY for GPT models")
390
- print(" - Use Runner.run_sync(agent, 'your message') to chat")
391
-
392
- # Uncomment this to test with actual API call:
393
- # result = Runner.run_sync(agent, "Hello. Please make me a fitness plan.")
394
- # print(result.final_output)
 
1
+ """
2
+ Fitness Agent - Main application entry point for Hugging Face Spaces
3
+ """
4
+ import sys
5
  import os
6
+ from pathlib import Path
7
+
8
+ # Add the necessary paths
9
+ current_dir = Path(__file__).parent
10
+ root_dir = current_dir.parent
11
+
12
+ # Add shared library to path
13
+ shared_path = root_dir / "shared" / "src"
14
+ if str(shared_path) not in sys.path:
15
+ sys.path.insert(0, str(shared_path))
16
+
17
+ # Add gradio app to path
18
+ gradio_app_path = root_dir / "apps" / "gradio-app" / "src"
19
+ if str(gradio_app_path) not in sys.path:
20
+ sys.path.insert(0, str(gradio_app_path))
21
+
22
+ try:
23
+ from fitness_core import setup_logging, Config, get_logger
24
+ from fitness_core.agents.base import BaseAgent
25
+ from fitness_core.agents.models import AgentConfig
26
+ from fitness_core.services.agent_runner import AgentRunner
27
 
28
+ # Configure logging
29
+ setup_logging(level=Config.LOG_LEVEL, log_file=Config.LOG_FILE)
30
+ logger = get_logger(__name__)
 
 
31
 
32
+ class FitnessAgent(BaseAgent):
33
+ """Main Fitness Agent class for backwards compatibility."""
34
+
35
+ def __init__(self, config: AgentConfig = None):
36
+ """Initialize the fitness agent."""
37
+ if config is None:
38
+ # Create default config
39
+ config = AgentConfig(
40
+ name="fitness_agent",
41
+ description="AI-powered fitness and nutrition assistant",
42
+ model_provider="openai",
43
+ model_name="gpt-4",
44
+ temperature=0.7
45
+ )
46
+ super().__init__(config)
47
+ self.runner = AgentRunner()
48
+
49
+ async def process_message(self, message: str, context: dict = None) -> str:
50
+ """Process a user message and return response."""
51
+ try:
52
+ response = await self.runner.run_agent(
53
+ message=message,
54
+ context=context or {}
55
+ )
56
+ return response
57
+ except Exception as e:
58
+ logger.error(f"Error processing message: {e}")
59
+ return f"I apologize, but I encountered an error: {str(e)}"
60
+
61
+ def get_capabilities(self) -> list:
62
+ """Get agent capabilities."""
63
+ return [
64
+ "Fitness program design",
65
+ "Nutrition advice",
66
+ "Workout planning",
67
+ "Health and wellness guidance"
68
+ ]
69
+
70
+ except ImportError as e:
71
+ logger = None
72
+ print(f"Warning: Could not import fitness_core modules: {e}")
73
 
74
+ # Fallback FitnessAgent for basic functionality
75
+ class FitnessAgent:
76
+ """Fallback Fitness Agent when core modules aren't available."""
 
 
 
 
 
 
 
77
 
78
+ def __init__(self, config=None):
79
+ self.config = config
80
 
81
+ async def process_message(self, message: str, context: dict = None) -> str:
82
+ """Basic message processing fallback."""
83
+ return "I'm a fitness AI assistant. Please ensure all dependencies are properly installed."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+ def get_capabilities(self) -> list:
86
+ """Get basic capabilities."""
87
+ return ["Basic fitness assistance"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,10 @@
1
- gradio==5.38.1
2
- openai-agents[litellm]==0.2.3
3
- python-dotenv==1.1.1
4
- reportlab==4.4.3
 
 
 
 
 
 
 
1
+ # Core dependencies for Fitness AI Assistant
2
+ gradio>=5.38.1
3
+ openai-agents[litellm]>=0.2.3
4
+ python-dotenv>=1.1.1
5
+ pydantic>=2.0.0
6
+ reportlab>=4.4.3
7
+
8
+ # Additional dependencies that may be needed
9
+ fastapi
10
+ aiofiles
shared/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fitness Core Library
2
+
3
+ Shared business logic, AI agents, and utilities for the Fitness App ecosystem.
4
+
5
+ ## Components
6
+
7
+ - **agents/**: AI agent implementations and model management
8
+ - **models/**: Pydantic data models
9
+ - **services/**: Core business logic and conversation management
10
+ - **utils/**: Configuration, logging, and utility functions
11
+
12
+ ## Usage
13
+
14
+ This library is designed to be imported by various frontend applications (Gradio, FastAPI, CLI, etc.)
15
+
16
+ ```python
17
+ from fitness_core.agents import FitnessAgent
18
+ from fitness_core.models import FitnessPlan
19
+ from fitness_core.services import ConversationManager
20
+ ```
shared/requirements.txt ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohappyeyeballs==2.6.1 ; python_version >= "3.12" and python_version < "4.0"
2
+ aiohttp==3.12.14 ; python_version >= "3.12" and python_version < "4.0"
3
+ aiosignal==1.4.0 ; python_version >= "3.12" and python_version < "4.0"
4
+ annotated-types==0.7.0 ; python_version >= "3.12" and python_version < "4.0"
5
+ anyio==4.9.0 ; python_version >= "3.12" and python_version < "4.0"
6
+ attrs==25.3.0 ; python_version >= "3.12" and python_version < "4.0"
7
+ certifi==2025.7.14 ; python_version >= "3.12" and python_version < "4.0"
8
+ charset-normalizer==3.4.2 ; python_version >= "3.12" and python_version < "4.0"
9
+ click==8.2.1 ; python_version >= "3.12" and python_version < "4.0"
10
+ colorama==0.4.6 ; python_version >= "3.12" and python_version < "4.0"
11
+ distro==1.9.0 ; python_version >= "3.12" and python_version < "4.0"
12
+ filelock==3.18.0 ; python_version >= "3.12" and python_version < "4.0"
13
+ frozenlist==1.7.0 ; python_version >= "3.12" and python_version < "4.0"
14
+ fsspec==2025.7.0 ; python_version >= "3.12" and python_version < "4.0"
15
+ griffe==1.8.0 ; python_version >= "3.12" and python_version < "4.0"
16
+ h11==0.16.0 ; python_version >= "3.12" and python_version < "4.0"
17
+ hf-xet==1.1.5 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "arm64" or platform_machine == "aarch64")
18
+ httpcore==1.0.9 ; python_version >= "3.12" and python_version < "4.0"
19
+ httpx-sse==0.4.1 ; python_version >= "3.12" and python_version < "4.0"
20
+ httpx==0.28.1 ; python_version >= "3.12" and python_version < "4.0"
21
+ huggingface-hub==0.34.1 ; python_version >= "3.12" and python_version < "4.0"
22
+ idna==3.10 ; python_version >= "3.12" and python_version < "4.0"
23
+ importlib-metadata==8.7.0 ; python_version >= "3.12" and python_version < "4.0"
24
+ jinja2==3.1.6 ; python_version >= "3.12" and python_version < "4.0"
25
+ jiter==0.10.0 ; python_version >= "3.12" and python_version < "4.0"
26
+ jsonschema-specifications==2025.4.1 ; python_version >= "3.12" and python_version < "4.0"
27
+ jsonschema==4.25.0 ; python_version >= "3.12" and python_version < "4.0"
28
+ litellm==1.74.8 ; python_version >= "3.12" and python_version < "4.0"
29
+ markupsafe==3.0.2 ; python_version >= "3.12" and python_version < "4.0"
30
+ mcp==1.12.2 ; python_version >= "3.12" and python_version < "4.0"
31
+ multidict==6.6.3 ; python_version >= "3.12" and python_version < "4.0"
32
+ openai-agents[litellm]==0.2.3 ; python_version >= "3.12" and python_version < "4.0"
33
+ openai==1.97.1 ; python_version >= "3.12" and python_version < "4.0"
34
+ packaging==25.0 ; python_version >= "3.12" and python_version < "4.0"
35
+ propcache==0.3.2 ; python_version >= "3.12" and python_version < "4.0"
36
+ pydantic-core==2.33.2 ; python_version >= "3.12" and python_version < "4.0"
37
+ pydantic-settings==2.10.1 ; python_version >= "3.12" and python_version < "4.0"
38
+ pydantic==2.11.7 ; python_version >= "3.12" and python_version < "4.0"
39
+ python-dotenv==1.1.1 ; python_version >= "3.12" and python_version < "4.0"
40
+ python-multipart==0.0.20 ; python_version >= "3.12" and python_version < "4.0"
41
+ pywin32==311 ; python_version >= "3.12" and python_version < "4.0" and sys_platform == "win32"
42
+ pyyaml==6.0.2 ; python_version >= "3.12" and python_version < "4.0"
43
+ referencing==0.36.2 ; python_version >= "3.12" and python_version < "4.0"
44
+ regex==2024.11.6 ; python_version >= "3.12" and python_version < "4.0"
45
+ requests==2.32.4 ; python_version >= "3.12" and python_version < "4.0"
46
+ rpds-py==0.26.0 ; python_version >= "3.12" and python_version < "4.0"
47
+ sniffio==1.3.1 ; python_version >= "3.12" and python_version < "4.0"
48
+ sse-starlette==3.0.2 ; python_version >= "3.12" and python_version < "4.0"
49
+ starlette==0.47.2 ; python_version >= "3.12" and python_version < "4.0"
50
+ tiktoken==0.9.0 ; python_version >= "3.12" and python_version < "4.0"
51
+ tokenizers==0.21.2 ; python_version >= "3.12" and python_version < "4.0"
52
+ tqdm==4.67.1 ; python_version >= "3.12" and python_version < "4.0"
53
+ types-requests==2.32.4.20250611 ; python_version >= "3.12" and python_version < "4.0"
54
+ typing-extensions==4.14.1 ; python_version >= "3.12" and python_version < "4.0"
55
+ typing-inspection==0.4.1 ; python_version >= "3.12" and python_version < "4.0"
56
+ urllib3==2.5.0 ; python_version >= "3.12" and python_version < "4.0"
57
+ uvicorn==0.35.0 ; python_version >= "3.12" and python_version < "4.0" and sys_platform != "emscripten"
58
+ yarl==1.20.1 ; python_version >= "3.12" and python_version < "4.0"
59
+ zipp==3.23.0 ; python_version >= "3.12" and python_version < "4.0"
shared/src/fitness_core/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fitness Core - Shared business logic and AI agents.
3
+
4
+ This package contains the core functionality that can be shared across
5
+ different user interfaces (Gradio, FastAPI, CLI, etc.).
6
+ """
7
+
8
+ __version__ = "0.1.0"
9
+
10
+ # Core exports
11
+ from .agents import FitnessAgent, FitnessPlan, ModelProvider
12
+ from .services import ConversationManager, AgentRunner, ResponseFormatter
13
+ from .utils import Config, setup_logging, get_logger
14
+
15
+ __all__ = [
16
+ # Agents
17
+ 'FitnessAgent',
18
+ 'FitnessPlan',
19
+ 'ModelProvider',
20
+
21
+ # Services
22
+ 'ConversationManager',
23
+ 'AgentRunner',
24
+ 'ResponseFormatter',
25
+
26
+ # Utils
27
+ 'Config',
28
+ 'setup_logging',
29
+ 'get_logger'
30
+ ]
shared/src/fitness_core/agents/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Agents module for the fitness core library.
3
+ """
4
+ from .base import FitnessAgent
5
+ from .models import FitnessPlan, AgentResponse, ConversationMessage, AgentConfig
6
+ from .providers import ModelProvider
7
+
8
+ __all__ = [
9
+ 'FitnessAgent',
10
+ 'FitnessPlan',
11
+ 'AgentResponse',
12
+ 'ConversationMessage',
13
+ 'AgentConfig',
14
+ 'ModelProvider'
15
+ ]
shared/src/fitness_core/agents/base.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main fitness agent implementation.
3
+ """
4
+ from typing import Optional
5
+ from agents import Agent
6
+ from dotenv import load_dotenv
7
+
8
+ from .models import FitnessPlan, AgentConfig
9
+ from .providers import ModelProvider
10
+
11
+ load_dotenv()
12
+
13
+
14
+ class FitnessAgent(Agent):
15
+ """
16
+ A helpful assistant for general fitness guidance and handoffs to a plan-building agent.
17
+
18
+ Supports multiple AI providers via LiteLLM (as of January 2025):
19
+
20
+ Anthropic models:
21
+ - Claude-4: claude-opus-4-20250514, claude-sonnet-4-20250514 (Premium)
22
+ - Claude-3.7: claude-3-7-sonnet-20250219 (Extended thinking)
23
+ - Claude-3.5: claude-3-5-sonnet-20241022 (latest), claude-3-5-sonnet-20240620 (stable), claude-3-5-haiku-20241022 (fast)
24
+ - Claude-3: claude-3-haiku-20240307 (legacy but reliable)
25
+
26
+ OpenAI models:
27
+ - GPT-4o: gpt-4o, gpt-4o-mini (Vision + latest capabilities)
28
+ - GPT-4: gpt-4-turbo (Legacy but stable)
29
+ - GPT-3.5: gpt-3.5-turbo (Cost-effective)
30
+ - Reasoning: o1-preview, o1-mini, o3-mini (Advanced reasoning)
31
+
32
+ Note: Some older models may be deprecated. Always check provider documentation.
33
+ """
34
+
35
+ def __init__(self, model_name: Optional[str] = None, config: Optional[AgentConfig] = None):
36
+ """
37
+ Initialize the Fitness Agent with configurable AI model (Anthropic or OpenAI).
38
+
39
+ Args:
40
+ model_name: Name of the AI model to use. Can be a key from SUPPORTED_MODELS
41
+ or a full model identifier. Defaults to gpt-4o-mini if not specified.
42
+ Can also be set via AI_MODEL, ANTHROPIC_MODEL, or OPENAI_MODEL environment variables.
43
+ config: Optional AgentConfig for additional configuration
44
+ """
45
+ # Resolve model name
46
+ resolved_model_name = ModelProvider.resolve_model_name(model_name)
47
+ final_model = ModelProvider.get_final_model_identifier(resolved_model_name)
48
+
49
+ # Store the model information for debugging
50
+ self.model_name = resolved_model_name
51
+ self.full_model_name = ModelProvider.SUPPORTED_MODELS.get(resolved_model_name, resolved_model_name)
52
+ self.final_model = final_model
53
+ self.provider = ModelProvider.get_provider(resolved_model_name, self.full_model_name)
54
+ self.config = config
55
+
56
+ # Create fitness plan agent
57
+ fitness_plan_agent = Agent(
58
+ name="Fitness Plan Assistant",
59
+ instructions="You are a helpful assistant for creating personalized fitness plans.",
60
+ model=final_model,
61
+ output_type=FitnessPlan
62
+ )
63
+
64
+ # Initialize parent Agent
65
+ super().__init__(
66
+ name="Fitness Assistant",
67
+ model=final_model,
68
+ instructions="""
69
+ You are a helpful assistant for fitness-related queries.
70
+
71
+ If the user wants to create a fitness plan, hand them off to the Fitness Plan Assistant.
72
+ """,
73
+ handoffs=[fitness_plan_agent]
74
+ )
75
+
76
+ @classmethod
77
+ def list_supported_models(cls) -> dict:
78
+ """Return a dictionary of supported model names and their full identifiers."""
79
+ return ModelProvider.SUPPORTED_MODELS.copy()
80
+
81
+ @classmethod
82
+ def get_model_info(cls, model_name: str) -> str:
83
+ """Get information about a specific model."""
84
+ return ModelProvider.get_model_info(model_name)
85
+
86
+ @classmethod
87
+ def get_recommended_models(cls) -> list:
88
+ """Get a list of recommended models that are most likely to be available."""
89
+ return ModelProvider.get_recommended_models()
90
+
91
+ @classmethod
92
+ def get_models_by_provider(cls) -> dict:
93
+ """Get models organized by provider."""
94
+ return ModelProvider.get_models_by_provider()
95
+
96
+ @classmethod
97
+ def get_models_table_data(cls) -> list:
98
+ """Get model data formatted for table display."""
99
+ return ModelProvider.get_models_table_data()
100
+
101
+ @classmethod
102
+ def validate_model_name(cls, model_name: str) -> tuple[bool, str]:
103
+ """
104
+ Validate if a model name is in our supported list and provide helpful feedback.
105
+
106
+ Returns:
107
+ tuple: (is_valid, message)
108
+ """
109
+ return ModelProvider.validate_model_name(model_name)
shared/src/fitness_core/agents/models.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pydantic models for the fitness agent.
3
+ """
4
+ from pydantic import BaseModel
5
+ from typing import Optional, List
6
+
7
+
8
+ class FitnessPlan(BaseModel):
9
+ """Structured fitness plan model."""
10
+ name: str
11
+ training_plan: str
12
+ meal_plan: str
13
+
14
+
15
+ class AgentResponse(BaseModel):
16
+ """Standard agent response format."""
17
+ content: str
18
+ plan: Optional[FitnessPlan] = None
19
+ metadata: Optional[dict] = None
20
+
21
+
22
+ class ConversationMessage(BaseModel):
23
+ """Individual conversation message."""
24
+ role: str # "user" or "assistant"
25
+ content: str
26
+ timestamp: Optional[str] = None
27
+
28
+
29
+ class AgentConfig(BaseModel):
30
+ """Configuration for the fitness agent."""
31
+ model_name: str
32
+ temperature: Optional[float] = 0.7
33
+ max_tokens: Optional[int] = None
34
+ custom_instructions: Optional[str] = None
shared/src/fitness_core/agents/providers.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI model provider management and configuration.
3
+ """
4
+ import os
5
+ from typing import Dict, List, Tuple, Optional
6
+ from .models import AgentConfig
7
+
8
+
9
+ class ModelProvider:
10
+ """Manages AI model configurations and provider-specific logic."""
11
+
12
+ # Available models via LiteLLM and native OpenAI
13
+ # Updated to include both Anthropic and OpenAI models as of January 2025
14
+ SUPPORTED_MODELS = {
15
+ # === ANTHROPIC MODELS (via LiteLLM) ===
16
+ # Claude-4 models (latest generation - may require special access)
17
+ "claude-4-opus": "litellm/anthropic/claude-opus-4-20250514",
18
+ "claude-4-sonnet": "litellm/anthropic/claude-sonnet-4-20250514",
19
+
20
+ # Claude-3.7 models (newest stable)
21
+ "claude-3.7-sonnet": "litellm/anthropic/claude-3-7-sonnet-20250219",
22
+
23
+ # Claude-3.5 models (widely available)
24
+ "claude-3.5-sonnet-latest": "litellm/anthropic/claude-3-5-sonnet-20241022", # Latest version
25
+ "claude-3.5-sonnet": "litellm/anthropic/claude-3-5-sonnet-20240620", # Previous stable version
26
+ "claude-3.5-haiku": "litellm/anthropic/claude-3-5-haiku-20241022", # New Haiku 3.5 model
27
+
28
+ # Claude-3 models (legacy but still available)
29
+ "claude-3-haiku": "litellm/anthropic/claude-3-haiku-20240307",
30
+
31
+ # === OPENAI MODELS (native) ===
32
+ # GPT-4o models (latest generation with vision)
33
+ "gpt-4o": "gpt-4o", # Latest GPT-4o model
34
+ "gpt-4o-mini": "gpt-4o-mini", # Compact version
35
+
36
+ # GPT-4 models (previous generation)
37
+ "gpt-4-turbo": "gpt-4-turbo", # Latest GPT-4 Turbo
38
+ "gpt-4": "gpt-4", # Original GPT-4
39
+
40
+ # GPT-3.5 models (cost-effective)
41
+ "gpt-3.5-turbo": "gpt-3.5-turbo", # Latest 3.5 turbo
42
+
43
+ # Reasoning models (o-series)
44
+ "o1-preview": "o1-preview", # Advanced reasoning
45
+ "o1-mini": "o1-mini", # Compact reasoning
46
+ "o3-mini": "o3-mini", # Latest reasoning model
47
+ }
48
+
49
+ @classmethod
50
+ def get_model_info(cls, model_name: str) -> str:
51
+ """Get information about a specific model."""
52
+ model_info = {
53
+ # === ANTHROPIC MODELS ===
54
+ "claude-4-opus": "Most capable and intelligent model. Superior reasoning, complex tasks (Premium tier)",
55
+ "claude-4-sonnet": "High-performance model with exceptional reasoning and efficiency (Premium tier)",
56
+ "claude-3.7-sonnet": "Enhanced model with extended thinking capabilities (Recommended)",
57
+ "claude-3.5-sonnet-latest": "Latest Claude 3.5 Sonnet with improved capabilities (Recommended)",
58
+ "claude-3.5-sonnet": "Excellent balance of intelligence and speed (Stable version)",
59
+ "claude-3.5-haiku": "Fast and compact model for near-instant responsiveness (New!)",
60
+ "claude-3-haiku": "Fastest model, good for simple tasks and cost-effective (Legacy but reliable)",
61
+
62
+ # === OPENAI MODELS ===
63
+ "gpt-4o": "Latest GPT-4o with vision, web browsing, and advanced capabilities (Recommended)",
64
+ "gpt-4o-mini": "Compact GPT-4o model - fast, capable, and cost-effective (Recommended)",
65
+ "gpt-4-turbo": "GPT-4 Turbo with large context window and improved efficiency",
66
+ "gpt-4": "Original GPT-4 model - highly capable but slower than turbo variants",
67
+ "gpt-3.5-turbo": "Fast and cost-effective model, good for straightforward tasks",
68
+ "o1-preview": "Advanced reasoning model with enhanced problem-solving (Preview)",
69
+ "o1-mini": "Compact reasoning model for faster inference with good capabilities",
70
+ "o3-mini": "Latest reasoning model with improved performance (New!)",
71
+ }
72
+ return model_info.get(model_name, "Model information not available")
73
+
74
+ @classmethod
75
+ def get_recommended_models(cls) -> List[str]:
76
+ """Get a list of recommended models that are most likely to be available."""
77
+ return [
78
+ # Anthropic recommendations (most reliable first)
79
+ "claude-3.5-haiku", # New fast model, default
80
+ "claude-3-haiku", # Most reliable, widely available, cost-effective
81
+ "claude-3.5-sonnet", # Stable version, widely available
82
+ "claude-3.5-sonnet-latest", # Latest improvements
83
+ "claude-3.7-sonnet", # Newest stable with extended thinking
84
+
85
+ # OpenAI recommendations
86
+ "gpt-4o-mini", # Best balance of capability and cost
87
+ "gpt-4o", # Latest flagship model
88
+ "gpt-3.5-turbo", # Most cost-effective OpenAI model
89
+ "gpt-4-turbo", # Solid previous generation
90
+ "o1-mini", # Good reasoning capabilities
91
+ ]
92
+
93
+ @classmethod
94
+ def get_models_by_provider(cls) -> Dict[str, Dict[str, str]]:
95
+ """Get models organized by provider."""
96
+ models = cls.SUPPORTED_MODELS
97
+ providers = {
98
+ "anthropic": {},
99
+ "openai": {},
100
+ "unknown": {}
101
+ }
102
+
103
+ for name, full_name in models.items():
104
+ if "claude" in name.lower() or "anthropic" in full_name.lower():
105
+ providers["anthropic"][name] = full_name
106
+ elif any(indicator in name.lower() for indicator in ["gpt-", "o1-", "o3-", "openai/"]):
107
+ providers["openai"][name] = full_name
108
+ else:
109
+ providers["unknown"][name] = full_name
110
+
111
+ return providers
112
+
113
+ @classmethod
114
+ def get_models_table_data(cls) -> List[List[str]]:
115
+ """Get model data formatted for table display."""
116
+ models = cls.SUPPORTED_MODELS
117
+ table_data = []
118
+
119
+ # Define capability ratings
120
+ capability_ratings = {
121
+ # Anthropic models
122
+ "claude-4-opus": "★★★★★",
123
+ "claude-4-sonnet": "★★★★☆",
124
+ "claude-3.7-sonnet": "★★★★☆",
125
+ "claude-3.5-sonnet-latest": "★★★★☆",
126
+ "claude-3.5-sonnet": "★★★★☆",
127
+ "claude-3.5-haiku": "★★★☆☆",
128
+ "claude-3-haiku": "★★★☆☆",
129
+ # OpenAI models
130
+ "gpt-4o": "★★★★★",
131
+ "gpt-4o-mini": "★★★★☆",
132
+ "gpt-4-turbo": "★★★★☆",
133
+ "gpt-4": "★★★★☆",
134
+ "gpt-3.5-turbo": "★★★☆☆",
135
+ "o1-preview": "★★★★★",
136
+ "o1-mini": "★★★★☆",
137
+ "o3-mini": "★★★★☆",
138
+ }
139
+
140
+ # Define speed ratings
141
+ speed_ratings = {
142
+ # Anthropic models
143
+ "claude-4-opus": "★★★☆☆",
144
+ "claude-4-sonnet": "★★★★☆",
145
+ "claude-3.7-sonnet": "★★★★☆",
146
+ "claude-3.5-sonnet-latest": "★★★★☆",
147
+ "claude-3.5-sonnet": "★★★★☆",
148
+ "claude-3.5-haiku": "★★★★★",
149
+ "claude-3-haiku": "★★★★★",
150
+ # OpenAI models
151
+ "gpt-4o": "★★★★☆",
152
+ "gpt-4o-mini": "★★★★★",
153
+ "gpt-4-turbo": "★★★★☆",
154
+ "gpt-4": "★★★☆☆",
155
+ "gpt-3.5-turbo": "★★★★★",
156
+ "o1-preview": "★★☆☆☆",
157
+ "o1-mini": "★★★☆☆",
158
+ "o3-mini": "★★★★☆",
159
+ }
160
+
161
+ # Define cost ratings (more stars = more expensive)
162
+ cost_ratings = {
163
+ # Anthropic models
164
+ "claude-4-opus": "★★★★★",
165
+ "claude-4-sonnet": "★★★★☆",
166
+ "claude-3.7-sonnet": "★★★☆☆",
167
+ "claude-3.5-sonnet-latest": "★★★☆☆",
168
+ "claude-3.5-sonnet": "★★★☆☆",
169
+ "claude-3.5-haiku": "★★☆☆☆",
170
+ "claude-3-haiku": "★☆☆☆☆",
171
+ # OpenAI models
172
+ "gpt-4o": "★★★★☆",
173
+ "gpt-4o-mini": "★★☆☆☆",
174
+ "gpt-4-turbo": "★★★★☆",
175
+ "gpt-4": "★★★★☆",
176
+ "gpt-3.5-turbo": "★☆☆☆☆",
177
+ "o1-preview": "★★★★★",
178
+ "o1-mini": "★★★☆☆",
179
+ "o3-mini": "★★★☆☆",
180
+ }
181
+
182
+ recommended = cls.get_recommended_models()
183
+
184
+ for model_name, full_path in models.items():
185
+ provider = "🔵 Anthropic" if "claude" in model_name.lower() else "🟢 OpenAI"
186
+ is_recommended = "⭐" if model_name in recommended else ""
187
+
188
+ table_data.append([
189
+ is_recommended,
190
+ provider,
191
+ model_name,
192
+ capability_ratings.get(model_name, "★★★☆☆"),
193
+ speed_ratings.get(model_name, "★★★☆☆"),
194
+ cost_ratings.get(model_name, "★★★☆☆"),
195
+ cls.get_model_info(model_name)
196
+ ])
197
+
198
+ return table_data
199
+
200
+ @classmethod
201
+ def validate_model_name(cls, model_name: str) -> Tuple[bool, str]:
202
+ """
203
+ Validate if a model name is in our supported list and provide helpful feedback.
204
+
205
+ Returns:
206
+ tuple: (is_valid, message)
207
+ """
208
+ if model_name in cls.SUPPORTED_MODELS:
209
+ full_name = cls.SUPPORTED_MODELS[model_name]
210
+ return True, f"Valid model: {model_name} -> {full_name}"
211
+ elif model_name in cls.SUPPORTED_MODELS.values():
212
+ return True, f"Valid full model identifier: {model_name}"
213
+ else:
214
+ recommended = ", ".join(cls.get_recommended_models())
215
+ return False, f"Model '{model_name}' not found. Recommended models: {recommended}"
216
+
217
+ @classmethod
218
+ def is_openai_model(cls, model_name: str, full_model_name: str) -> bool:
219
+ """Check if this is an OpenAI model that should use native API."""
220
+ # Check direct model name matches
221
+ openai_models = ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo", "o1-preview", "o1-mini", "o3-mini"]
222
+ if model_name in openai_models:
223
+ return True
224
+
225
+ # Check for OpenAI indicators in model names
226
+ openai_prefixes = ["gpt-", "o1-", "o3-", "openai/", "litellm/openai/"]
227
+ for prefix in openai_prefixes:
228
+ if prefix in model_name.lower() or prefix in full_model_name.lower():
229
+ return True
230
+
231
+ return False
232
+
233
+ @classmethod
234
+ def get_provider(cls, model_name: str, full_model_name: str) -> str:
235
+ """Determine the provider based on the model."""
236
+ if cls.is_openai_model(model_name, full_model_name):
237
+ return "openai"
238
+ elif "claude" in model_name.lower() or "anthropic" in full_model_name.lower():
239
+ return "anthropic"
240
+ else:
241
+ return "unknown"
242
+
243
+ @classmethod
244
+ def resolve_model_name(cls, model_name: Optional[str] = None) -> str:
245
+ """
246
+ Resolve model name from various sources (env vars, default, etc.).
247
+
248
+ Args:
249
+ model_name: Explicit model name, if provided
250
+
251
+ Returns:
252
+ Resolved model name
253
+ """
254
+ if model_name is None:
255
+ # Check environment variables in priority order
256
+ model_name = (
257
+ os.getenv("AI_MODEL") or
258
+ os.getenv("ANTHROPIC_MODEL") or
259
+ os.getenv("OPENAI_MODEL") or
260
+ "gpt-4o-mini" # Default fallback - reliable OpenAI model
261
+ )
262
+
263
+ # Validate the model name
264
+ is_valid, validation_message = cls.validate_model_name(model_name)
265
+ if not is_valid:
266
+ print(f"Warning: {validation_message}")
267
+ print(f"Falling back to default model: gpt-4o-mini")
268
+ model_name = "gpt-4o-mini"
269
+
270
+ return model_name
271
+
272
+ @classmethod
273
+ def get_final_model_identifier(cls, model_name: str) -> str:
274
+ """
275
+ Get the final model identifier to use with the agents library.
276
+
277
+ Args:
278
+ model_name: Model name (key or full identifier)
279
+
280
+ Returns:
281
+ Final model identifier for the agents library
282
+ """
283
+ # Resolve model name to full identifier
284
+ if model_name in cls.SUPPORTED_MODELS:
285
+ full_model_name = cls.SUPPORTED_MODELS[model_name]
286
+ else:
287
+ # Assume it's already a full model identifier
288
+ full_model_name = model_name
289
+
290
+ # Determine if this is an OpenAI model or needs LiteLLM prefix
291
+ if cls.is_openai_model(model_name, full_model_name):
292
+ # Use native OpenAI model (no prefix needed)
293
+ final_model = full_model_name
294
+ else:
295
+ # For Anthropic models, use the full model name as-is since it already has litellm/ prefix
296
+ final_model = full_model_name
297
+
298
+ return final_model
shared/src/fitness_core/services/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core services for the fitness core library.
3
+ """
4
+ from .conversation import ConversationManager
5
+ from .agent_runner import AgentRunner
6
+ from .formatters import ResponseFormatter
7
+ from .exceptions import (
8
+ FitnessAppError,
9
+ FitnessUIError,
10
+ AgentExecutionError,
11
+ ModelProviderError,
12
+ ConversationError
13
+ )
14
+
15
+ __all__ = [
16
+ 'ConversationManager',
17
+ 'AgentRunner',
18
+ 'ResponseFormatter',
19
+ 'FitnessAppError',
20
+ 'FitnessUIError',
21
+ 'AgentExecutionError',
22
+ 'ModelProviderError',
23
+ 'ConversationError'
24
+ ]
shared/src/fitness_core/services/agent_runner.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Agent execution and streaming functionality.
3
+ """
4
+ import asyncio
5
+ import logging
6
+ from typing import Union, List, Dict, Any, Generator, AsyncGenerator
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from agents import Runner
9
+
10
+ from ..agents.base import FitnessAgent
11
+ from .exceptions import AgentExecutionError
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class AgentRunner:
17
+ """Handles agent execution with streaming and error management."""
18
+
19
+ @staticmethod
20
+ def run_agent_with_streaming_sync(
21
+ agent: FitnessAgent,
22
+ agent_input: Union[str, List[Dict[str, str]]]
23
+ ) -> Generator[Dict[str, Any], None, None]:
24
+ """
25
+ Run the agent with streaming support in a synchronous context (for Gradio)
26
+
27
+ Args:
28
+ agent: The fitness agent instance
29
+ agent_input: Input for the agent (string for first message, list for conversation)
30
+
31
+ Yields:
32
+ Streaming response chunks from the agent with content and final result
33
+ """
34
+ try:
35
+ logger.info(f"Running agent with streaming (sync). Input type: {type(agent_input)}")
36
+
37
+ # Handle event loop creation for worker threads
38
+ def _run_with_new_loop():
39
+ """Create a new event loop and run the agent"""
40
+ # Create a new event loop for this thread
41
+ loop = asyncio.new_event_loop()
42
+ asyncio.set_event_loop(loop)
43
+ try:
44
+ # Now we can use Runner.run_sync which will use this loop
45
+ return Runner.run_sync(agent, agent_input)
46
+ finally:
47
+ # Clean up the loop
48
+ loop.close()
49
+ asyncio.set_event_loop(None)
50
+
51
+ try:
52
+ # Try direct call first (in case we're in main thread)
53
+ final_result = Runner.run_sync(agent, agent_input)
54
+ except RuntimeError as e:
55
+ if "no current event loop" in str(e).lower() or "anyio worker thread" in str(e).lower():
56
+ # We're in a worker thread, create new event loop
57
+ final_result = _run_with_new_loop()
58
+ else:
59
+ raise
60
+
61
+ # Extract content and yield it
62
+ content = AgentRunner._extract_content_from_result(final_result)
63
+
64
+ # Simulate streaming by yielding the result
65
+ yield {
66
+ 'type': 'final_result',
67
+ 'result': final_result,
68
+ 'content': content
69
+ }
70
+
71
+ except Exception as e:
72
+ logger.error(f"Agent execution error: {str(e)}")
73
+ # Return error as a final result-like object
74
+ class ErrorResult:
75
+ def __init__(self, content):
76
+ self.final_output = content
77
+
78
+ def to_input_list(self):
79
+ return [{"role": "assistant", "content": self.final_output}]
80
+
81
+ yield {
82
+ 'type': 'error',
83
+ 'result': ErrorResult(f"Sorry, I encountered an error while processing your request: {str(e)}"),
84
+ 'content': f"Sorry, I encountered an error while processing your request: {str(e)}"
85
+ }
86
+
87
+ @staticmethod
88
+ async def run_agent_with_streaming(
89
+ agent: FitnessAgent,
90
+ agent_input: Union[str, List[Dict[str, str]]]
91
+ ) -> AsyncGenerator[Dict[str, Any], None]:
92
+ """
93
+ Run the agent with streaming support using the correct Runner.run_streamed API
94
+
95
+ Args:
96
+ agent: The fitness agent instance
97
+ agent_input: Input for the agent (string for first message, list for conversation)
98
+
99
+ Yields:
100
+ Streaming response chunks from the agent with content and final result
101
+ """
102
+ try:
103
+ logger.info(f"Running agent with streaming. Input type: {type(agent_input)}")
104
+
105
+ # Use the correct streaming API
106
+ result = Runner.run_streamed(agent, agent_input)
107
+
108
+ accumulated_content = ""
109
+ final_result = None
110
+ has_content = False
111
+
112
+ try:
113
+ async for chunk in result:
114
+ if hasattr(chunk, 'content') and chunk.content:
115
+ accumulated_content += chunk.content
116
+ has_content = True
117
+ yield {
118
+ 'type': 'chunk',
119
+ 'content': chunk.content,
120
+ 'accumulated': accumulated_content
121
+ }
122
+ elif hasattr(chunk, 'final_output'):
123
+ final_result = chunk
124
+ break
125
+
126
+ # If we didn't get content through streaming, try direct execution
127
+ if not has_content:
128
+ logger.info("No streaming content received, falling back to direct execution")
129
+ final_result = Runner.run_sync(agent, agent_input)
130
+ accumulated_content = AgentRunner._extract_content_from_result(final_result)
131
+
132
+ except Exception as streaming_error:
133
+ logger.warning(f"Streaming failed: {streaming_error}, falling back to sync execution")
134
+ final_result = Runner.run_sync(agent, agent_input)
135
+ accumulated_content = AgentRunner._extract_content_from_result(final_result)
136
+
137
+ # Get the final result if we haven't already from fallback
138
+ if final_result is None:
139
+ final_result = Runner.run_sync(agent, agent_input)
140
+
141
+ # Yield the final result for conversation management
142
+ yield {
143
+ 'type': 'final_result',
144
+ 'result': final_result,
145
+ 'content': accumulated_content
146
+ }
147
+
148
+ except Exception as e:
149
+ logger.error(f"Agent streaming error: {str(e)}")
150
+ # Return error as a final result-like object
151
+ class ErrorResult:
152
+ def __init__(self, content):
153
+ self.final_output = content
154
+
155
+ def to_input_list(self):
156
+ return [{"role": "assistant", "content": self.final_output}]
157
+
158
+ yield {
159
+ 'type': 'error',
160
+ 'result': ErrorResult(f"Sorry, I encountered an error while processing your request: {str(e)}"),
161
+ 'content': f"Sorry, I encountered an error while processing your request: {str(e)}"
162
+ }
163
+
164
+ @staticmethod
165
+ def run_agent_safely_sync(
166
+ agent: FitnessAgent,
167
+ agent_input: Union[str, List[Dict[str, str]]]
168
+ ) -> Any:
169
+ """
170
+ Synchronous wrapper for the agent execution - with event loop handling
171
+
172
+ Args:
173
+ agent: The fitness agent instance
174
+ agent_input: Input for the agent (string for first message, list for conversation)
175
+
176
+ Returns:
177
+ Final agent result
178
+ """
179
+ try:
180
+ # Handle event loop creation for worker threads
181
+ def _run_with_new_loop():
182
+ """Create a new event loop and run the agent"""
183
+ # Create a new event loop for this thread
184
+ loop = asyncio.new_event_loop()
185
+ asyncio.set_event_loop(loop)
186
+ try:
187
+ # Now we can use Runner.run_sync which will use this loop
188
+ return Runner.run_sync(agent, agent_input)
189
+ finally:
190
+ # Clean up the loop
191
+ loop.close()
192
+ asyncio.set_event_loop(None)
193
+
194
+ try:
195
+ # Try direct call first (in case we're in main thread)
196
+ return Runner.run_sync(agent, agent_input)
197
+ except RuntimeError as e:
198
+ if "no current event loop" in str(e).lower() or "anyio worker thread" in str(e).lower():
199
+ # We're in a worker thread, create new event loop
200
+ return _run_with_new_loop()
201
+ else:
202
+ raise
203
+
204
+ except Exception as e:
205
+ logger.error(f"Agent execution error: {str(e)}")
206
+
207
+ # Create a mock result object for error cases
208
+ class ErrorResult:
209
+ def __init__(self, error_message):
210
+ self.final_output = error_message
211
+
212
+ def to_input_list(self):
213
+ return [{"role": "assistant", "content": self.final_output}]
214
+
215
+ return ErrorResult(f"Sorry, I encountered an error while processing your request: {str(e)}")
216
+
217
+ @staticmethod
218
+ def _extract_content_from_result(result: Any) -> str:
219
+ """
220
+ Extract content from agent response with proper error handling
221
+
222
+ Args:
223
+ result: Agent response object
224
+
225
+ Returns:
226
+ Formatted response string
227
+ """
228
+ try:
229
+ if hasattr(result, 'final_output'):
230
+ content = result.final_output
231
+
232
+ # Check if this looks like a fitness plan
233
+ if hasattr(content, 'name') and hasattr(content, 'training_plan'):
234
+ from .formatters import ResponseFormatter
235
+ return ResponseFormatter.format_fitness_plan(content)
236
+ else:
237
+ return str(content)
238
+ elif hasattr(result, 'content'):
239
+ return str(result.content)
240
+ elif isinstance(result, str):
241
+ return result
242
+ else:
243
+ return str(result)
244
+ except Exception as e:
245
+ logger.error(f"Error extracting content from result: {str(e)}")
246
+ return f"Sorry, I encountered an error while formatting the response: {str(e)}"
shared/src/fitness_core/services/conversation.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation management for the fitness agent.
3
+ """
4
+ from typing import List, Dict, Union, Any
5
+ from ..agents.models import ConversationMessage
6
+ from .exceptions import ConversationError
7
+
8
+
9
+ class ConversationManager:
10
+ """Manages conversation history and state for the fitness agent"""
11
+
12
+ def __init__(self):
13
+ self.conversation_history: List[Dict[str, str]] = []
14
+ self.thread_id = "fitness_thread_001" # Could be made dynamic per session
15
+
16
+ def add_user_message(self, content: str) -> None:
17
+ """Add a user message to the conversation history"""
18
+ if not content or not content.strip():
19
+ raise ConversationError("Cannot add empty message to conversation")
20
+
21
+ self.conversation_history.append({"role": "user", "content": content.strip()})
22
+
23
+ def add_assistant_message(self, content: str) -> None:
24
+ """Add an assistant message to the conversation history"""
25
+ if not content or not content.strip():
26
+ raise ConversationError("Cannot add empty assistant message to conversation")
27
+
28
+ self.conversation_history.append({"role": "assistant", "content": content.strip()})
29
+
30
+ def get_input_for_agent(self) -> Union[str, List[Dict[str, str]]]:
31
+ """Get the input format needed for the agent"""
32
+ if not self.conversation_history:
33
+ return "Hello"
34
+ elif len(self.conversation_history) == 1:
35
+ # First message - just send the content
36
+ return self.conversation_history[0]["content"]
37
+ else:
38
+ # Multiple messages - send the full history
39
+ return self.conversation_history
40
+
41
+ def update_from_result(self, result: Any) -> None:
42
+ """Update conversation history from agent result"""
43
+ try:
44
+ if hasattr(result, 'to_input_list'):
45
+ # Update our history with the complete conversation from the agent
46
+ self.conversation_history = result.to_input_list()
47
+ else:
48
+ # Extract content and add as assistant message
49
+ content = self._extract_content_from_result(result)
50
+ if content:
51
+ self.add_assistant_message(content)
52
+ except Exception as e:
53
+ raise ConversationError(f"Failed to update conversation from result: {str(e)}")
54
+
55
+ def _extract_content_from_result(self, result: Any) -> str:
56
+ """Extract content from various result formats"""
57
+ if hasattr(result, 'final_output'):
58
+ return str(result.final_output)
59
+ elif hasattr(result, 'content'):
60
+ return str(result.content)
61
+ elif isinstance(result, str):
62
+ return result
63
+ else:
64
+ return str(result)
65
+
66
+ def clear_history(self) -> None:
67
+ """Clear the conversation history"""
68
+ self.conversation_history = []
69
+
70
+ def get_history_summary(self) -> str:
71
+ """Get a summary of the conversation for debugging"""
72
+ return f"Conversation has {len(self.conversation_history)} messages"
73
+
74
+ def get_last_user_message(self) -> str:
75
+ """Get the last user message"""
76
+ for message in reversed(self.conversation_history):
77
+ if message["role"] == "user":
78
+ return message["content"]
79
+ return ""
80
+
81
+ def get_last_assistant_message(self) -> str:
82
+ """Get the last assistant message"""
83
+ for message in reversed(self.conversation_history):
84
+ if message["role"] == "assistant":
85
+ return message["content"]
86
+ return ""
87
+
88
+ def get_conversation_as_messages(self) -> List[ConversationMessage]:
89
+ """Get conversation as structured ConversationMessage objects"""
90
+ return [
91
+ ConversationMessage(role=msg["role"], content=msg["content"])
92
+ for msg in self.conversation_history
93
+ ]
shared/src/fitness_core/services/exceptions.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom exceptions for the fitness app.
3
+ """
4
+
5
+
6
+ class FitnessAppError(Exception):
7
+ """Base exception for fitness app errors."""
8
+ pass
9
+
10
+
11
+ class FitnessUIError(FitnessAppError):
12
+ """Custom exception for UI-related errors."""
13
+ pass
14
+
15
+
16
+ class AgentExecutionError(FitnessAppError):
17
+ """Exception for agent execution errors."""
18
+ pass
19
+
20
+
21
+ class ModelProviderError(FitnessAppError):
22
+ """Exception for model provider errors."""
23
+ pass
24
+
25
+
26
+ class ConversationError(FitnessAppError):
27
+ """Exception for conversation management errors."""
28
+ pass
shared/src/fitness_core/services/formatters.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Response formatting utilities for the fitness app.
3
+ """
4
+ import re
5
+ import logging
6
+ from typing import Any, Generator, List, Dict
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class ResponseFormatter:
12
+ """Handles formatting of various response types."""
13
+
14
+ @staticmethod
15
+ def format_fitness_plan(plan_obj: Any, style: str = "default") -> str:
16
+ """
17
+ Format a FitnessPlan object into a structured markdown string
18
+
19
+ Args:
20
+ plan_obj: The fitness plan object
21
+ style: Formatting style ("default", "minimal", "detailed")
22
+
23
+ Returns:
24
+ Formatted markdown string
25
+ """
26
+ try:
27
+ if not (hasattr(plan_obj, 'name') and
28
+ hasattr(plan_obj, 'training_plan') and
29
+ hasattr(plan_obj, 'meal_plan')):
30
+ # Try to parse as string if it's not a proper object
31
+ if isinstance(plan_obj, str):
32
+ return ResponseFormatter.parse_fitness_plan_from_string(plan_obj)
33
+ else:
34
+ return f"**Fitness Plan**\n\n{str(plan_obj)}"
35
+
36
+ if style == "minimal":
37
+ return f"""**{plan_obj.name}**
38
+
39
+ **Training:** {plan_obj.training_plan}
40
+
41
+ **Meals:** {plan_obj.meal_plan}"""
42
+
43
+ elif style == "detailed":
44
+ return f"""# 🏋️ {plan_obj.name}
45
+
46
+ ## 💪 Training Plan
47
+ {plan_obj.training_plan}
48
+
49
+ ## 🥗 Meal Plan
50
+ {plan_obj.meal_plan}
51
+
52
+ ## 📊 Additional Information
53
+ - Plan created with AI assistance
54
+ - Customize as needed for your preferences
55
+ - Consult healthcare providers for medical advice
56
+
57
+ ---
58
+ *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
59
+
60
+ else: # default style
61
+ return f"""# 🏋️ {plan_obj.name}
62
+
63
+ ## 💪 Training Plan
64
+ {plan_obj.training_plan}
65
+
66
+ ## 🥗 Meal Plan
67
+ {plan_obj.meal_plan}
68
+
69
+ ---
70
+ *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
71
+
72
+ except Exception as e:
73
+ logger.error(f"Error formatting fitness plan: {str(e)}")
74
+ return f"**Fitness Plan**\n\nI created a fitness plan for you, but encountered an error while formatting it. Here's the raw content:\n\n{str(plan_obj)}"
75
+
76
+ @staticmethod
77
+ def parse_fitness_plan_from_string(plan_str: str) -> str:
78
+ """
79
+ Parse a fitness plan from its string representation
80
+
81
+ Args:
82
+ plan_str: String representation of a fitness plan object
83
+
84
+ Returns:
85
+ Formatted markdown string
86
+ """
87
+ try:
88
+ # Extract name - handle both single and double quotes
89
+ name_match = re.search(r"name=['\"]([^'\"]*)['\"]", plan_str)
90
+ name = name_match.group(1) if name_match else "Fitness Plan"
91
+
92
+ # Extract training plan - handle both list format and simple string format
93
+ training_plan = ""
94
+
95
+ # Try list format first (with brackets and quotes)
96
+ training_match = re.search(r"training_plan=['\"](\[.*?\])['\"]", plan_str, re.DOTALL)
97
+ if training_match:
98
+ training_raw = training_match.group(1)
99
+ # Clean up the list format
100
+ training_plan = training_raw.replace('[', '').replace(']', '').replace("'", "").replace('"', '')
101
+ training_plan = training_plan.replace(',', '\n').strip()
102
+ else:
103
+ # Try simple string format
104
+ training_match = re.search(r"training_plan=['\"]([^'\"]*)['\"]", plan_str, re.DOTALL)
105
+ if training_match:
106
+ training_plan = training_match.group(1)
107
+
108
+ # Extract meal plan - handle both list format and simple string format
109
+ meal_plan = ""
110
+
111
+ # Try list format first (with brackets and quotes)
112
+ meal_match = re.search(r"meal_plan=['\"](\[.*?\])['\"]", plan_str, re.DOTALL)
113
+ if meal_match:
114
+ meal_raw = meal_match.group(1)
115
+ # Clean up the list format
116
+ meal_plan = meal_raw.replace('[', '').replace(']', '').replace("'", "").replace('"', '')
117
+ meal_plan = meal_plan.replace(',', '\n').strip()
118
+ else:
119
+ # Try simple string format
120
+ meal_match = re.search(r"meal_plan=['\"]([^'\"]*)['\"]", plan_str, re.DOTALL)
121
+ if meal_match:
122
+ meal_plan = meal_match.group(1)
123
+
124
+ # Format as markdown
125
+ formatted_plan = f"""# 🏋️ {name}
126
+
127
+ ## 💪 Training Plan
128
+ {training_plan}
129
+
130
+ ## 🥗 Meal Plan
131
+ {meal_plan}
132
+
133
+ ---
134
+ *Your personalized fitness plan is ready! Feel free to ask any questions about the plan or request modifications.*"""
135
+
136
+ return formatted_plan
137
+
138
+ except Exception as e:
139
+ logger.error(f"Error parsing fitness plan from string: {str(e)}")
140
+ # Fallback to basic formatting
141
+ return f"**Fitness Plan**\n\n{plan_str}"
142
+
143
+ @staticmethod
144
+ def extract_response_content(result: Any) -> str:
145
+ """
146
+ Extract content from agent response with proper error handling
147
+
148
+ Args:
149
+ result: Agent response object
150
+
151
+ Returns:
152
+ Formatted response string
153
+ """
154
+ try:
155
+ if hasattr(result, 'final_output'):
156
+ content = result.final_output
157
+
158
+ # Check if this looks like a fitness plan
159
+ if hasattr(content, 'name') and hasattr(content, 'training_plan'):
160
+ return ResponseFormatter.format_fitness_plan(content)
161
+ elif isinstance(content, str) and ('training_plan=' in content or 'meal_plan=' in content):
162
+ return ResponseFormatter.parse_fitness_plan_from_string(content)
163
+ else:
164
+ return str(content)
165
+ elif hasattr(result, 'content'):
166
+ return str(result.content)
167
+ elif isinstance(result, str):
168
+ return result
169
+ else:
170
+ return str(result)
171
+ except Exception as e:
172
+ logger.error(f"Error extracting content from result: {str(e)}")
173
+ return f"Sorry, I encountered an error while formatting the response: {str(e)}"
174
+
175
+ @staticmethod
176
+ def stream_response(
177
+ response: str,
178
+ history: List[Dict],
179
+ chunk_size: int = 3
180
+ ) -> Generator[List[Dict], None, None]:
181
+ """
182
+ Stream response text with configurable chunk size for better UX
183
+
184
+ Args:
185
+ response: Response text to stream
186
+ history: Current chat history
187
+ chunk_size: Number of characters per chunk
188
+
189
+ Yields:
190
+ Updated history with streaming response
191
+ """
192
+ try:
193
+ # Add empty assistant message to history
194
+ history = history + [{"role": "assistant", "content": ""}]
195
+
196
+ # Stream the response character by character or in chunks
197
+ for i in range(0, len(response), chunk_size):
198
+ chunk = response[i:i + chunk_size]
199
+ history[-1]["content"] += chunk
200
+ yield history
201
+
202
+ except Exception as e:
203
+ logger.error(f"Error streaming response: {str(e)}")
204
+ # Fallback to complete response
205
+ history = history + [{"role": "assistant", "content": response}]
206
+ yield history
shared/src/fitness_core/utils/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for the fitness core library.
3
+ """
4
+ from .config import Config
5
+ from .logging import setup_logging, get_logger
6
+
7
+ __all__ = [
8
+ 'Config',
9
+ 'setup_logging',
10
+ 'get_logger'
11
+ ]
shared/src/fitness_core/utils/config.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration management for the fitness app.
3
+ """
4
+ import os
5
+ from typing import Optional, Dict, Any
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+
12
+ class Config:
13
+ """Application configuration management."""
14
+
15
+ # Server configuration
16
+ SERVER_NAME: str = os.getenv("SERVER_NAME", "0.0.0.0")
17
+ SERVER_PORT: int = int(os.getenv("SERVER_PORT", "7860"))
18
+ DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true"
19
+
20
+ # AI Model configuration
21
+ DEFAULT_MODEL: str = os.getenv("AI_MODEL", os.getenv("OPENAI_MODEL", "gpt-4o-mini"))
22
+ ANTHROPIC_API_KEY: Optional[str] = os.getenv("ANTHROPIC_API_KEY")
23
+ OPENAI_API_KEY: Optional[str] = os.getenv("OPENAI_API_KEY")
24
+
25
+ # Logging configuration
26
+ LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
27
+ LOG_FILE: Optional[str] = os.getenv("LOG_FILE")
28
+
29
+ # UI configuration
30
+ MAX_CHAT_HISTORY: int = int(os.getenv("MAX_CHAT_HISTORY", "50"))
31
+ STREAMING_CHUNK_SIZE: int = int(os.getenv("STREAMING_CHUNK_SIZE", "3"))
32
+
33
+ @classmethod
34
+ def get_gradio_config(cls) -> Dict[str, Any]:
35
+ """Get configuration for Gradio app launch."""
36
+ return {
37
+ "server_name": cls.SERVER_NAME,
38
+ "server_port": cls.SERVER_PORT,
39
+ "show_error": True,
40
+ "debug": cls.DEBUG
41
+ }
42
+
43
+ @classmethod
44
+ def has_anthropic_key(cls) -> bool:
45
+ """Check if Anthropic API key is configured."""
46
+ return cls.ANTHROPIC_API_KEY is not None and len(cls.ANTHROPIC_API_KEY.strip()) > 0
47
+
48
+ @classmethod
49
+ def has_openai_key(cls) -> bool:
50
+ """Check if OpenAI API key is configured."""
51
+ return cls.OPENAI_API_KEY is not None and len(cls.OPENAI_API_KEY.strip()) > 0
52
+
53
+ @classmethod
54
+ def validate_config(cls) -> Dict[str, Any]:
55
+ """Validate configuration and return status."""
56
+ status = {
57
+ "valid": True,
58
+ "warnings": [],
59
+ "errors": []
60
+ }
61
+
62
+ # Check API keys
63
+ if not cls.has_anthropic_key() and not cls.has_openai_key():
64
+ status["errors"].append(
65
+ "No API keys configured. Please set ANTHROPIC_API_KEY or OPENAI_API_KEY environment variable."
66
+ )
67
+ status["valid"] = False
68
+
69
+ if not cls.has_anthropic_key():
70
+ status["warnings"].append("ANTHROPIC_API_KEY not set - Claude models will not work")
71
+
72
+ if not cls.has_openai_key():
73
+ status["warnings"].append("OPENAI_API_KEY not set - OpenAI models will not work")
74
+
75
+ # Check port availability (basic check)
76
+ if not (1024 <= cls.SERVER_PORT <= 65535):
77
+ status["warnings"].append(f"Server port {cls.SERVER_PORT} may not be valid")
78
+
79
+ return status
shared/src/fitness_core/utils/logging.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Logging configuration for the fitness app.
3
+ """
4
+ import logging
5
+ import sys
6
+ from typing import Optional
7
+
8
+
9
+ def setup_logging(level: str = "INFO", log_file: Optional[str] = None) -> None:
10
+ """
11
+ Configure logging for the fitness app.
12
+
13
+ Args:
14
+ level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
15
+ log_file: Optional file path to write logs to
16
+ """
17
+ # Convert string level to logging constant
18
+ numeric_level = getattr(logging, level.upper(), logging.INFO)
19
+
20
+ # Create formatter
21
+ formatter = logging.Formatter(
22
+ fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
23
+ datefmt='%Y-%m-%d %H:%M:%S'
24
+ )
25
+
26
+ # Configure root logger
27
+ root_logger = logging.getLogger()
28
+ root_logger.setLevel(numeric_level)
29
+
30
+ # Clear existing handlers
31
+ root_logger.handlers.clear()
32
+
33
+ # Add console handler
34
+ console_handler = logging.StreamHandler(sys.stdout)
35
+ console_handler.setLevel(numeric_level)
36
+ console_handler.setFormatter(formatter)
37
+ root_logger.addHandler(console_handler)
38
+
39
+ # Add file handler if specified
40
+ if log_file:
41
+ file_handler = logging.FileHandler(log_file)
42
+ file_handler.setLevel(numeric_level)
43
+ file_handler.setFormatter(formatter)
44
+ root_logger.addHandler(file_handler)
45
+
46
+ # Set specific logger levels
47
+ logging.getLogger("fitness_app").setLevel(numeric_level)
48
+ logging.getLogger("agents").setLevel(logging.WARNING) # Reduce noise from agents library
49
+ logging.getLogger("httpx").setLevel(logging.WARNING) # Reduce HTTP noise
50
+ logging.getLogger("gradio").setLevel(logging.INFO) # Keep Gradio info
51
+
52
+
53
+ def get_logger(name: str) -> logging.Logger:
54
+ """
55
+ Get a logger instance for the given name.
56
+
57
+ Args:
58
+ name: Logger name (usually __name__)
59
+
60
+ Returns:
61
+ Configured logger instance
62
+ """
63
+ return logging.getLogger(name)