#!/bin/bash # Start Ollama in the background # ollama serve & # Wait for Ollama to be ready (check if port 11434 is open) echo "Waiting for Ollama to start..." # timeout 30s bash -c "until curl -s http://localhost:11434 > /dev/null; do sleep 1; done" # if [ $? -eq 0 ]; then # echo "Ollama is running." #else # echo "Failed to start Ollama within 30 seconds." # exit 1 #fi # Pull the model (llama3.2) echo "Pulling llama3.2 model..." #ollama pull llama3.2:1b # Start Streamlit echo "Starting Streamlit..." streamlit run app.py \ --server.headless true \ --server.enableCORS false \ --server.enableXsrfProtection false \ --server.fileWatcherType none