Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,7 @@
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
@@ -6,20 +10,59 @@ import torch
|
|
6 |
model = AutoModelForCausalLM.from_pretrained("bert-base-uncased")
|
7 |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
def main():
|
18 |
st.title("Mental Health Chatbot")
|
19 |
input_message = st.text_input("You:")
|
|
|
|
|
20 |
if st.button("Send"):
|
21 |
-
response = chatbot(input_message)
|
22 |
st.text_area("Chatbot:", value=response, height=100)
|
23 |
|
24 |
if __name__ == "__main__":
|
25 |
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#To add temperature control, increase token lengths, and restrictions to provide suggestions to visit nearby therapists, you can modify the chatbot function as follows:
|
2 |
+
|
3 |
+
#app.py (Streamlit version with temperature control and restrictions)
|
4 |
+
|
5 |
import streamlit as st
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
import torch
|
|
|
10 |
model = AutoModelForCausalLM.from_pretrained("bert-base-uncased")
|
11 |
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
12 |
|
13 |
+
def chatbot(input_message, temperature=0.7, max_length=50):
|
14 |
+
# System roles
|
15 |
+
if input_message.lower() == "hello":
|
16 |
+
return "Welcome to the Mental Health Chatbot! I'm here to listen and help."
|
17 |
+
elif input_message.lower() == "help":
|
18 |
+
return "I'm here to provide support and resources for mental health. What's on your mind?"
|
19 |
+
# Assistant roles
|
20 |
+
elif input_message.lower().startswith("what is"):
|
21 |
+
return "I can provide information on various mental health topics. Please ask a specific question."
|
22 |
+
elif input_message.lower().startswith("how to"):
|
23 |
+
return "I can offer suggestions and resources for managing mental health. Please ask a specific question."
|
24 |
+
# Therapist suggestion
|
25 |
+
elif input_message.lower().startswith("i need help"):
|
26 |
+
return "Consider visiting a nearby therapist for professional support. You can search online for therapists in your area."
|
27 |
+
# Default response
|
28 |
+
else:
|
29 |
+
input_ids = tokenizer.encode(input_message, return_tensors="pt")
|
30 |
+
outputs = model.generate(
|
31 |
+
input_ids=input_ids,
|
32 |
+
max_length=max_length,
|
33 |
+
num_return_sequences=1,
|
34 |
+
temperature=temperature,
|
35 |
+
top_k=50,
|
36 |
+
top_p=0.95,
|
37 |
+
repetition_penalty=1.2,
|
38 |
+
no_repeat_ngram_size=3,
|
39 |
+
)
|
40 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
41 |
+
# Restrict responses to mental health topics
|
42 |
+
if "mental health" not in response.lower():
|
43 |
+
response = "I'm here to provide support and resources for mental health. Please ask a specific question."
|
44 |
+
return response
|
45 |
|
46 |
def main():
|
47 |
st.title("Mental Health Chatbot")
|
48 |
input_message = st.text_input("You:")
|
49 |
+
temperature = st.slider("Temperature", 0.1, 1.0, 0.7)
|
50 |
+
max_length = st.slider("Max Length", 50, 100, 50)
|
51 |
if st.button("Send"):
|
52 |
+
response = chatbot(input_message, temperature, max_length)
|
53 |
st.text_area("Chatbot:", value=response, height=100)
|
54 |
|
55 |
if __name__ == "__main__":
|
56 |
main()
|
57 |
+
|
58 |
+
# Run the app with: streamlit run app.py
|
59 |
+
'''
|
60 |
+
Here's what's changed:
|
61 |
+
|
62 |
+
- I added two sliders to control the temperature and max length of the responses.
|
63 |
+
- I added a conditional statement to suggest visiting a nearby therapist if the user inputs "I need help".
|
64 |
+
- I restricted the responses to mental health topics by checking if the response contains "mental health".
|
65 |
+
|
66 |
+
You can adjust the temperature and max length sliders to control the creativity and length of the responses. The temperature parameter controls how likely the model is to generate novel responses, while the max length parameter controls the maximum length of the responses.
|
67 |
+
|
68 |
+
Note that you may need to adjust the model and tokenizer to better fit your specific use case. Additionally, you may want to consider adding more restrictions and guidelines to ensure the chatbot provides appropriate and safe responses.'''
|