Kathirsci commited on
Commit
bdd95b8
·
verified ·
1 Parent(s): ae9cd66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -57
app.py CHANGED
@@ -1,68 +1,32 @@
1
- #To add temperature control, increase token lengths, and restrictions to provide suggestions to visit nearby therapists, you can modify the chatbot function as follows:
2
-
3
- #app.py (Streamlit version with temperature control and restrictions)
4
-
5
  import streamlit as st
6
- from transformers import AutoModelForCausalLM, AutoTokenizer
7
- import torch
8
-
9
- # Load pre-trained model and tokenizer
10
- model = AutoModelForCausalLM.from_pretrained("bert-base-uncased")
11
- tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
12
-
13
- def chatbot(input_message, temperature=0.7, max_length=50):
14
- # System roles
15
- if input_message.lower() == "hello":
16
- return "Welcome to the Mental Health Chatbot! I'm here to listen and help."
17
- elif input_message.lower() == "help":
18
- return "I'm here to provide support and resources for mental health. What's on your mind?"
19
- # Assistant roles
20
- elif input_message.lower().startswith("what is"):
21
- return "I can provide information on various mental health topics. Please ask a specific question."
22
- elif input_message.lower().startswith("how to"):
23
- return "I can offer suggestions and resources for managing mental health. Please ask a specific question."
24
- # Therapist suggestion
25
- elif input_message.lower().startswith("i need help"):
26
- return "Consider visiting a nearby therapist for professional support. You can search online for therapists in your area."
27
- # Default response
28
- else:
29
- input_ids = tokenizer.encode(input_message, return_tensors="pt")
30
- outputs = model.generate(
31
- input_ids=input_ids,
32
- max_length=max_length,
33
- num_return_sequences=1,
34
- temperature=temperature,
35
- top_k=50,
36
- top_p=0.95,
37
- repetition_penalty=1.2,
38
- no_repeat_ngram_size=3,
39
- )
40
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
- # Restrict responses to mental health topics
42
- if "mental health" not in response.lower():
43
- response = "I'm here to provide support and resources for mental health. Please ask a specific question."
44
- return response
45
 
46
  def main():
47
  st.title("Mental Health Chatbot")
48
  input_message = st.text_input("You:")
49
- temperature = st.slider("Temperature", 0.1, 1.0, 0.7)
50
- max_length = st.slider("Max Length", 50, 100, 50)
51
  if st.button("Send"):
52
- response = chatbot(input_message, temperature, max_length)
53
  st.text_area("Chatbot:", value=response, height=100)
54
 
55
  if __name__ == "__main__":
56
  main()
57
 
58
- # Run the app with: streamlit run app.py
59
- '''
60
- Here's what's changed:
61
-
62
- - I added two sliders to control the temperature and max length of the responses.
63
- - I added a conditional statement to suggest visiting a nearby therapist if the user inputs "I need help".
64
- - I restricted the responses to mental health topics by checking if the response contains "mental health".
65
-
66
- You can adjust the temperature and max length sliders to control the creativity and length of the responses. The temperature parameter controls how likely the model is to generate novel responses, while the max length parameter controls the maximum length of the responses.
67
-
68
- Note that you may need to adjust the model and tokenizer to better fit your specific use case. Additionally, you may want to consider adding more restrictions and guidelines to ensure the chatbot provides appropriate and safe responses.'''
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelWithLMHead, AutoTokenizer
3
+
4
+ # Load pre-trained T5 base model and tokenizer
5
+ model = AutoModelWithLMHead.from_pretrained("t5-base")
6
+ tokenizer = AutoTokenizer.from_pretrained("t5-base")
7
+
8
+ def chatbot(input_message):
9
+ input_ids = tokenizer.encode(f"generate text: {input_message}", return_tensors="pt")
10
+ outputs = model.generate(
11
+ input_ids=input_ids,
12
+ max_length=50,
13
+ num_return_sequences=1,
14
+ temperature=0.7,
15
+ top_k=50,
16
+ top_p=0.95,
17
+ repetition_penalty=1.2,
18
+ no_repeat_ngram_size=3,
19
+ )
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def main():
24
  st.title("Mental Health Chatbot")
25
  input_message = st.text_input("You:")
 
 
26
  if st.button("Send"):
27
+ response = chatbot(input_message)
28
  st.text_area("Chatbot:", value=response, height=100)
29
 
30
  if __name__ == "__main__":
31
  main()
32