File size: 1,327 Bytes
1e0a03c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load pre-trained models
model_small_name = "microsoft/DialoGPT-small"
model_large_name = "microsoft/DialoGPT-medium"

tokenizer_small = AutoTokenizer.from_pretrained(model_small_name)
model_small = AutoModelForCausalLM.from_pretrained(model_small_name)

tokenizer_large = AutoTokenizer.from_pretrained(model_large_name)
model_large = AutoModelForCausalLM.from_pretrained(model_large_name)

# Function to generate responses
def generate_response(input_text, model, tokenizer):
    inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
    outputs = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Streamlit app
st.title("Mental Health Chatbot")

# User input
user_input = st.text_input("You:", "")

if user_input:
    # Generate responses for both models
    response_small = generate_response(user_input, model_small, tokenizer_small)
    response_large = generate_response(user_input, model_large, tokenizer_large)

    # Display responses
    st.subheader("DialoGPT-small Response:")
    st.write(response_small)

    st.subheader("DialoGPT-medium Response:")
    st.write(response_large)