import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, pipeline #from llama_cpp import Llama from datasets import load_dataset import os import requests # Replace with the direct image URL flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png" # Inject custom CSS for the background with a centered and blurred image st.markdown( f""" """, unsafe_allow_html=True ) # Add the blurred background div st.markdown('
', unsafe_allow_html=True) #""""""""""""""""""""""""" Application Code Starts here """"""""""""""""""""""""""""""""""""""""""""" # Load the dataset @st.cache_resource def load_counseling_dataset(): return load_dataset("Amod/mental_health_counseling_conversations") dataset = load_counseling_dataset() # Fine-tune the model and save it @st.cache_resource def fine_tune_model(): from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling # Load base model and tokenizer model_name = "prabureddy/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Prepare dataset for training def preprocess_function(examples): return tokenizer(examples["context"] + "\n" + examples["response"], truncation=True) tokenized_datasets = dataset.map(preprocess_function, batched=True) data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) # Training arguments training_args = TrainingArguments( output_dir="./fine_tuned_model", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=1, num_train_epochs=3, weight_decay=0.01, save_total_limit=2, save_strategy="epoch" ) # Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train() # Save the fine-tuned model trainer.save_model("./fine_tuned_model") tokenizer.save_pretrained("./fine_tuned_model") return "./fine_tuned_model" # Load or fine-tune the model model_dir = fine_tune_model() # Load the fine-tuned model for inference @st.cache_resource def load_pipeline(model_dir): return pipeline("text-generation", model=model_dir) pipe = load_pipeline(model_dir) # Streamlit App st.title("Mental Health Support Assistant") st.markdown(""" Welcome to the **Mental Health Support Assistant**. This tool helps detect potential mental health concerns based on user input and provides **uplifting and positive suggestions** to boost morale. """) # User input for mental health concerns user_input = st.text_area("Please share your concern:", placeholder="Type your question or concern here...") if st.button("Get Supportive Response"): if user_input.strip(): with st.spinner("Analyzing your input and generating a response..."): try: # Construct the messages for the pipeline messages = [{"role": "user", "content": user_input}] # Generate a response response = pipe(messages)[0]["generated_text"] st.subheader("Supportive Suggestion:") st.markdown(f"**{response}**") except Exception as e: st.error(f"An error occurred while generating the response: {e}") else: st.error("Please enter a concern to receive suggestions.") # Sidebar for additional resources st.sidebar.header("Additional Resources") st.sidebar.markdown(""" - [Mental Health Foundation](https://www.mentalhealth.org) - [Mind](https://www.mind.org.uk) - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org) """) st.sidebar.info("This application is not a replacement for professional counseling. If you're in crisis, seek professional help immediately.")