Spaces:
Running
Running
Upload 13 files
Browse files- .gitattributes +35 -35
- .gitignore +1 -0
- README.md +15 -14
- requirements.txt +4 -0
- src/__pycache__/main.cpython-312.pyc +0 -0
- src/app.py +23 -0
- src/streamlit_app/__pycache__/home_page.cpython-312.pyc +0 -0
- src/streamlit_app/__pycache__/model.cpython-312.pyc +0 -0
- src/streamlit_app/home_page.py +59 -0
- src/streamlit_app/model.py +30 -0
- src/streamlit_app/streamlit_app.lnk +0 -0
- src/utils/__pycache__/logger.cpython-312.pyc +0 -0
- src/utils/logger.py +22 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.venv
|
README.md
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
-
---
|
2 |
-
title: Physio
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.42.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
1 |
+
---
|
2 |
+
title: Physio-Flex
|
3 |
+
emoji: 🏃
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.42.2
|
8 |
+
app_file: src/app.py
|
9 |
+
pinned: false
|
10 |
+
thumbnail: >-
|
11 |
+
https://cdn-uploads.huggingface.co/production/uploads/67416a9706f2e52e3556570c/24C2U-iZbfg311-ncaSez.jpeg
|
12 |
+
short_description: 'An helpful ai agent that deals in physiotherapy '
|
13 |
+
---
|
14 |
+
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pandas
|
3 |
+
groq
|
4 |
+
scikit-learn
|
src/__pycache__/main.cpython-312.pyc
ADDED
Binary file (434 Bytes). View file
|
|
src/app.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_app import model,home_page
|
3 |
+
import numpy as np
|
4 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
5 |
+
import time
|
6 |
+
|
7 |
+
|
8 |
+
def main():
|
9 |
+
|
10 |
+
home_page.page_config()
|
11 |
+
|
12 |
+
home_page.page_title()
|
13 |
+
|
14 |
+
username = home_page.get_or_greet_user_name()
|
15 |
+
|
16 |
+
if username:
|
17 |
+
|
18 |
+
home_page.display_chat()
|
19 |
+
|
20 |
+
home_page.handle_user_input()
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
main()
|
src/streamlit_app/__pycache__/home_page.cpython-312.pyc
ADDED
Binary file (5.15 kB). View file
|
|
src/streamlit_app/__pycache__/model.cpython-312.pyc
ADDED
Binary file (1.62 kB). View file
|
|
src/streamlit_app/home_page.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_app import model
|
3 |
+
from utils import logger
|
4 |
+
|
5 |
+
|
6 |
+
def page_config():
|
7 |
+
st.set_page_config(page_title="Physio Assist", layout="wide")
|
8 |
+
logger.log("info","Configuring the page layout and title")
|
9 |
+
|
10 |
+
def page_title():
|
11 |
+
st.title("🌿🤸♂️ Physio-Flex: Your Path to a Healthier, Stronger You")
|
12 |
+
logger.log("info", "Setting the title to '🌿🤸♂️ Physio-Flex: Your Path to a Healthier, Stronger You'")
|
13 |
+
|
14 |
+
def get_or_greet_user_name():
|
15 |
+
if 'user_name' not in st.session_state:
|
16 |
+
st.session_state.user_name = None
|
17 |
+
logger.log("info","user_name not found in session_state, setting to None.")
|
18 |
+
|
19 |
+
if st.session_state.user_name is None:
|
20 |
+
logger.log("info","user_name is None, requesting user input.")
|
21 |
+
user_name = st.text_input("Please let me know your name:",
|
22 |
+
placeholder="Enter your name buddy")
|
23 |
+
if user_name:
|
24 |
+
st.session_state.user_name = user_name
|
25 |
+
logger.log("info", f"User entered name: {user_name}. Setting session_state.user_name.")
|
26 |
+
st.rerun()
|
27 |
+
else:
|
28 |
+
logger.log("info", f"User already entered a name: {st.session_state.user_name}. Displaying greeting.")
|
29 |
+
return st._bottom.subheader(f"Hello {st.session_state.user_name}! How can I assist you today?")
|
30 |
+
|
31 |
+
def display_chat():
|
32 |
+
logger.log("info","Displaying the chat history.")
|
33 |
+
if "messages" not in st.session_state:
|
34 |
+
st.session_state.messages = []
|
35 |
+
for message in st.session_state.messages:
|
36 |
+
with st.chat_message(message["role"]):
|
37 |
+
st.markdown(message["content"])
|
38 |
+
logger.log("info", f"Displayed {len(st.session_state.messages)} messages from the chat history.")
|
39 |
+
|
40 |
+
def handle_user_input():
|
41 |
+
logger.log("info", "Waiting for user input...")
|
42 |
+
prompt = st.chat_input("Ask me anything related to physiotherapy. E.g., 'How can I recover from a sprained ankle?'")
|
43 |
+
|
44 |
+
if prompt :
|
45 |
+
with st.chat_message("user"):
|
46 |
+
st.markdown(prompt)
|
47 |
+
if prompt:
|
48 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
49 |
+
|
50 |
+
with st.spinner("Processing your query..."):
|
51 |
+
try:
|
52 |
+
response = model.get_physiotherapy_assistant_response(prompt)
|
53 |
+
with st.chat_message("assistant"):
|
54 |
+
st.markdown(response)
|
55 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
56 |
+
logger.log("info",f"Assistant response: {response}")
|
57 |
+
except Exception as e:
|
58 |
+
st.error(f"An error occurred while processing your query: {str(e)}")
|
59 |
+
logger.log("error", f"Error processing user query: {str(e)}")
|
src/streamlit_app/model.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from groq import Groq
|
3 |
+
|
4 |
+
# Fetch API key from environment variables
|
5 |
+
api_key = os.getenv("GROQ_API_KEY")
|
6 |
+
|
7 |
+
if not api_key:
|
8 |
+
raise ValueError("API key not found. Please set it in the Hugging Face Secrets.")
|
9 |
+
|
10 |
+
client = Groq(api_key=api_key)
|
11 |
+
|
12 |
+
def get_physiotherapy_assistant_response(prompt: str):
|
13 |
+
try:
|
14 |
+
system_message = """You are a helpful physiotherapy assistant, trained to provide useful information about exercises, recovery, and treatments.
|
15 |
+
You can help users with various physical therapy-related queries."""
|
16 |
+
|
17 |
+
if not prompt or len(prompt.strip()) < 5:
|
18 |
+
return "Please provide more details about your physiotherapy question. I need more context to assist you effectively."
|
19 |
+
|
20 |
+
chat_completion = client.chat.completions.create(
|
21 |
+
messages=[
|
22 |
+
{"role": "system", "content": system_message},
|
23 |
+
{"role": "user", "content": prompt},
|
24 |
+
],
|
25 |
+
model="llama-3.3-70b-versatile",
|
26 |
+
)
|
27 |
+
assistant_response = chat_completion.choices[0].message.content
|
28 |
+
return assistant_response
|
29 |
+
except Exception as e:
|
30 |
+
return f"An error occurred while processing your request: {str(e)}"
|
src/streamlit_app/streamlit_app.lnk
ADDED
Binary file (1.59 kB). View file
|
|
src/utils/__pycache__/logger.cpython-312.pyc
ADDED
Binary file (1.21 kB). View file
|
|
src/utils/logger.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
logging.basicConfig(
|
4 |
+
level=logging.INFO,
|
5 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
6 |
+
datefmt='%Y-%m-%d %H:%M')
|
7 |
+
|
8 |
+
def log(log_type,message):
|
9 |
+
try:
|
10 |
+
log_type = log_type.lower()
|
11 |
+
if log_type == "info":
|
12 |
+
return logging.info(message)
|
13 |
+
elif log_type == "debug":
|
14 |
+
return logging.debug(message)
|
15 |
+
elif log_type == "warning":
|
16 |
+
return logging.error(message)
|
17 |
+
elif log_type == "critical":
|
18 |
+
return logging.critical(message)
|
19 |
+
elif log_type == "error":
|
20 |
+
return logging.error(message)
|
21 |
+
except Exception as e:
|
22 |
+
return f"Invalid log type.Error : {e}"
|