File size: 1,081 Bytes
cc0280c 344126d cc0280c dd53a96 344126d c0c1896 344126d dd53a96 c0c1896 344126d cc0280c dd53a96 cc0280c dd53a96 344126d dd53a96 344126d cc0280c 344126d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
#!/bin/bash
# 启动 Ollama 服务
echo "Starting Ollama service..."
ollama serve > /app/ollama.log 2>&1 &
# 等待 Ollama 服务完全启动
echo "Waiting for Ollama service to start..."
for i in {1..30}; do
if curl -s http://127.0.0.1:11434/api/version > /dev/null; then
echo "Ollama service is up!"
break
fi
if [ $i -eq 30 ]; then
echo "Timeout waiting for Ollama service"
cat /app/ollama.log
exit 1
fi
echo "Waiting... ($i/30)"
sleep 2
done
# 检查模型文件
echo "Checking model file..."
if [ ! -f /app/model.gguf ]; then
echo "Error: Model file not found!"
exit 1
fi
# 检查并创建模型
echo "Checking for llama3-zh model..."
if ! ollama list | grep -q "llama3-zh"; then
echo "Creating llama3-zh model..."
ollama create llama3-zh -f /app/Modelfile
if [ $? -ne 0 ]; then
echo "Failed to create model"
cat /app/ollama.log
exit 1
fi
echo "Model created successfully!"
fi
# 启动 Python 应用
echo "Starting Gradio application..."
exec python app.py |