File size: 5,589 Bytes
3801ee5
 
bccdd0d
3801ee5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9061c2c
 
 
 
 
 
3801ee5
 
 
 
 
 
 
 
 
 
 
 
9061c2c
3801ee5
 
 
 
 
 
 
 
 
9061c2c
 
 
3801ee5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d227682
3801ee5
bccdd0d
3801ee5
 
 
 
 
bccdd0d
3801ee5
 
 
 
 
bccdd0d
3801ee5
 
 
 
 
 
 
 
bccdd0d
3801ee5
 
 
bccdd0d
3801ee5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/usr/bin/env python3
"""
Hockey Mind AI Chatbot - Fixed Gradio Interface for Hugging Face Spaces
"""
import gradio as gr
import asyncio
import os
from dotenv import load_dotenv
from OpenAPI_DB import agentic_hockey_chat

# Load environment variables
load_dotenv()

# Global variable to track if resources are loaded
resources_loaded = False

async def chat_interface(user_role, user_team, user_prompt):
    """Interface function for Gradio"""
    global resources_loaded
    
    try:
        # Load resources on first use to save memory
        if not resources_loaded:
            try:
                from OpenAPI_DB import load_resources
                load_resources()
                resources_loaded = True
            except ImportError as import_err:
                return f"Import Error: {str(import_err)}. Please check if all required packages are installed.", "Unable to load ML models."
        
        # Call the main chat function
        result = await agentic_hockey_chat(user_role, user_team, user_prompt)
        
        # Format response for Gradio
        ai_response = result.get('ai_response', 'Sorry, no response generated.')
        recommendations = result.get('recommended_content_details', [])
        
        # Format recommendations as HTML
        rec_html = ""
        if recommendations:
            rec_html = "<h3>πŸ’ Recommended Videos:</h3><ul>"
            for rec in recommendations[:5]:
                title = rec.get('title', 'No title')
                url = rec.get('url', '#')
                similarity = rec.get('similarity', 0)
                rec_html += f"<li><a href='{url}' target='_blank'>{title}</a> (Similarity: {similarity:.3f})</li>"
            rec_html += "</ul>"
        
        return ai_response, rec_html
        
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        return f"Error: {str(e)}\n\nDetails:\n{error_details}", "No recommendations available due to error."

def sync_chat_interface(user_role, user_team, user_prompt):
    """Synchronous wrapper for Gradio"""
    return asyncio.run(chat_interface(user_role, user_team, user_prompt))

# Gradio Interface
with gr.Blocks(
    title="πŸ’ Hockey Mind AI Chatbot",
    theme=gr.themes.Soft(),
    css="""
    .gradio-container {max-width: 800px !important; margin: auto !important;}
    .main-header {text-align: center; margin-bottom: 2rem;}
    """
) as demo:
    
    gr.HTML("""
    <div class="main-header">
        <h1>πŸ’ Hockey Mind AI Chatbot</h1>
        <p>Get personalized hockey advice and video recommendations!</p>
        <p><i>Optimized for field hockey coaching, training, and player development</i></p>
    </div>
    """)
    
    with gr.Row():
        with gr.Column():
            user_role = gr.Dropdown(
                choices=["le Trainer", "le Coach", "Speler"],
                label="Your Role πŸ‘€",
                value="Coach"
            )
            
            user_team = gr.Textbox(
                label="Team/Level πŸ’",
                placeholder="e.g., U8C, Toronto Maple Leafs, Beginner",
                value="U10"
            )
            
            user_prompt = gr.Textbox(
                label="Your Question ❓",
                placeholder="Ask about drills, techniques, strategies, rules...",
                lines=3
            )
            
            submit_btn = gr.Button("Get Hockey Advice πŸš€", variant="primary", size="lg")
    
    with gr.Row():
        ai_response = gr.Textbox(
            label="πŸ€– AI Response",
            lines=8,
            interactive=False
        )
    
    with gr.Row():
        recommendations = gr.HTML()
    
    # Examples section
    gr.HTML("<br><h3>πŸ’‘ Example Questions:</h3>")
    
    examples = gr.Examples(
        examples=[
            ["Coach", "U8C", "What are the best backhand shooting drills for young players?"],
            ["Player", "Intermediate", "How can I improve my penalty corner technique?"],
            ["le Coach", "U10", "Geef me oefeningen voor backhandschoten"],
            ["Parent", "Beginner", "What equipment does my child need to start playing hockey?"],
            ["Coach", "Advanced", "What are effective small-sided games for skill development?"],
        ],
        inputs=[user_role, user_team, user_prompt],
        outputs=[ai_response, recommendations],
        fn=sync_chat_interface,
    )
    
    # Event handler
    submit_btn.click(
        fn=sync_chat_interface,
        inputs=[user_role, user_team, user_prompt],
        outputs=[ai_response, recommendations],
        api_name="chat"
    )
    
    user_prompt.submit(
        fn=sync_chat_interface,
        inputs=[user_role, user_team, user_prompt],
        outputs=[ai_response, recommendations]
    )
    
    # Footer
    gr.HTML("""
    <br>
    <div style="text-align: center; color: #666; font-size: 0.9em;">
        <p>πŸ’ Hockey Mind AI - Powered by OpenRouter & Sentence Transformers</p>
        <p>Supports English & Dutch | Built for field hockey community</p>
    </div>
    """)

# Launch configuration for Hugging Face Spaces
if __name__ == "__main__":
    # Check if running on Hugging Face Spaces
    if os.getenv("SPACE_ID"):
        # Production mode on HF Spaces
        demo.launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=False,
            show_error=True,
            quiet=False
        )
    else:
        # Local development mode
        demo.launch(
            share=True,
            show_error=True
        )