Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,10 +6,10 @@ from dotenv import load_dotenv
|
|
| 6 |
# Load environment variables from the .env file
|
| 7 |
load_dotenv()
|
| 8 |
|
| 9 |
-
# Get the API key from the .env file
|
| 10 |
-
|
| 11 |
|
| 12 |
-
if
|
| 13 |
st.error("API key not found! Please set the GEMINI_API_KEY in your .env file.")
|
| 14 |
st.stop()
|
| 15 |
|
|
@@ -22,69 +22,83 @@ questions = [
|
|
| 22 |
|
| 23 |
# Function to query the Gemini API
|
| 24 |
def query_gemini_api(user_answers):
|
| 25 |
-
|
| 26 |
-
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-latest:generateContent?key=AIzaSyCP1yUFUGmJqlcb2laZRXpqvJldTNEcSbQ"
|
| 27 |
-
|
| 28 |
headers = {'Content-Type': 'application/json'}
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
input_text = " ".join(user_answers)
|
| 32 |
|
| 33 |
-
# Payload for the API request
|
| 34 |
payload = {
|
| 35 |
"contents": [
|
| 36 |
{
|
| 37 |
"parts": [
|
| 38 |
-
{"text":
|
| 39 |
]
|
| 40 |
}
|
| 41 |
]
|
| 42 |
}
|
| 43 |
|
| 44 |
try:
|
| 45 |
-
# Send
|
| 46 |
response = requests.post(url, headers=headers, json=payload)
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
if response.status_code == 200:
|
| 49 |
result = response.json()
|
| 50 |
-
|
| 51 |
-
#
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
else:
|
| 55 |
-
# Handle API error
|
| 56 |
st.error(f"API Error {response.status_code}: {response.text}")
|
| 57 |
-
return None
|
| 58 |
except requests.exceptions.RequestException as e:
|
| 59 |
st.error(f"An error occurred: {e}")
|
| 60 |
-
return None
|
| 61 |
|
| 62 |
-
# Streamlit app
|
| 63 |
def main():
|
| 64 |
st.title("Mood Analysis and Suggestions")
|
|
|
|
| 65 |
st.write("Answer the following 3 questions to help us understand your mood:")
|
| 66 |
|
| 67 |
-
# Collect user
|
| 68 |
responses = []
|
| 69 |
for i, question in enumerate(questions):
|
| 70 |
response = st.text_input(f"{i+1}. {question}")
|
| 71 |
if response:
|
| 72 |
responses.append(response)
|
| 73 |
|
| 74 |
-
#
|
| 75 |
if len(responses) == len(questions):
|
| 76 |
st.write("Processing your answers...")
|
| 77 |
|
| 78 |
-
#
|
| 79 |
-
recommendations = query_gemini_api(responses)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
st.write("### Recommendations to Improve Your Mood:")
|
| 83 |
-
|
|
|
|
| 84 |
else:
|
|
|
|
| 85 |
st.warning("Could not generate mood analysis. Please try again later.")
|
| 86 |
else:
|
| 87 |
-
st.
|
| 88 |
|
| 89 |
if __name__ == "__main__":
|
| 90 |
main()
|
|
|
|
| 6 |
# Load environment variables from the .env file
|
| 7 |
load_dotenv()
|
| 8 |
|
| 9 |
+
# Get the Gemini API key from the .env file
|
| 10 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
| 11 |
|
| 12 |
+
if GEMINI_API_KEY is None:
|
| 13 |
st.error("API key not found! Please set the GEMINI_API_KEY in your .env file.")
|
| 14 |
st.stop()
|
| 15 |
|
|
|
|
| 22 |
|
| 23 |
# Function to query the Gemini API
|
| 24 |
def query_gemini_api(user_answers):
|
| 25 |
+
url = f'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-latest:generateContent?key={GEMINI_API_KEY}'
|
|
|
|
|
|
|
| 26 |
headers = {'Content-Type': 'application/json'}
|
| 27 |
|
| 28 |
+
# Prepare the payload with user answers
|
| 29 |
+
input_text = " ".join(user_answers) # Combining all answers into one input text
|
| 30 |
|
|
|
|
| 31 |
payload = {
|
| 32 |
"contents": [
|
| 33 |
{
|
| 34 |
"parts": [
|
| 35 |
+
{"text": input_text}
|
| 36 |
]
|
| 37 |
}
|
| 38 |
]
|
| 39 |
}
|
| 40 |
|
| 41 |
try:
|
| 42 |
+
# Send the request to the Gemini API
|
| 43 |
response = requests.post(url, headers=headers, json=payload)
|
| 44 |
|
| 45 |
+
# Log the response for debugging
|
| 46 |
+
print(f"Status Code: {response.status_code}") # Log the status code
|
| 47 |
+
print(f"Response Text: {response.text}") # Log the response text
|
| 48 |
+
|
| 49 |
+
# Check if the API call is successful
|
| 50 |
if response.status_code == 200:
|
| 51 |
result = response.json()
|
| 52 |
+
|
| 53 |
+
# Check if the response contains valid mood and recommendations
|
| 54 |
+
mood = result.get("mood", None)
|
| 55 |
+
recommendations = result.get("recommendations", None)
|
| 56 |
+
|
| 57 |
+
if mood and recommendations:
|
| 58 |
+
return mood, recommendations
|
| 59 |
+
else:
|
| 60 |
+
st.error("No mood or recommendations found in the response.")
|
| 61 |
+
return None, None
|
| 62 |
else:
|
|
|
|
| 63 |
st.error(f"API Error {response.status_code}: {response.text}")
|
| 64 |
+
return None, None
|
| 65 |
except requests.exceptions.RequestException as e:
|
| 66 |
st.error(f"An error occurred: {e}")
|
| 67 |
+
return None, None
|
| 68 |
|
| 69 |
+
# Streamlit app for collecting answers
|
| 70 |
def main():
|
| 71 |
st.title("Mood Analysis and Suggestions")
|
| 72 |
+
|
| 73 |
st.write("Answer the following 3 questions to help us understand your mood:")
|
| 74 |
|
| 75 |
+
# Collect responses from the user
|
| 76 |
responses = []
|
| 77 |
for i, question in enumerate(questions):
|
| 78 |
response = st.text_input(f"{i+1}. {question}")
|
| 79 |
if response:
|
| 80 |
responses.append(response)
|
| 81 |
|
| 82 |
+
# If all 3 responses are collected, send them to Gemini for analysis
|
| 83 |
if len(responses) == len(questions):
|
| 84 |
st.write("Processing your answers...")
|
| 85 |
|
| 86 |
+
# Get mood and recommendations from Gemini API
|
| 87 |
+
mood, recommendations = query_gemini_api(responses)
|
| 88 |
+
|
| 89 |
+
if mood and recommendations:
|
| 90 |
+
# Display the detected mood
|
| 91 |
+
st.write(f"Detected Mood: {mood}")
|
| 92 |
|
| 93 |
+
# Display the recommendations
|
| 94 |
st.write("### Recommendations to Improve Your Mood:")
|
| 95 |
+
for recommendation in recommendations:
|
| 96 |
+
st.write(f"- {recommendation}")
|
| 97 |
else:
|
| 98 |
+
# If no valid mood or recommendations are found, show a message
|
| 99 |
st.warning("Could not generate mood analysis. Please try again later.")
|
| 100 |
else:
|
| 101 |
+
st.write("Please answer all 3 questions to receive suggestions.")
|
| 102 |
|
| 103 |
if __name__ == "__main__":
|
| 104 |
main()
|