Spaces:
Paused
Paused
File size: 2,040 Bytes
69c6372 490e6a3 69c6372 490e6a3 57f9fa5 148829b 57f9fa5 490e6a3 69c6372 dc19c1d 490e6a3 dc19c1d 69c6372 dc19c1d 69c6372 dc19c1d 69c6372 5bd7bc7 8c5a84b c0cde8e 1530e6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
#!/bin/bash
# Validate MODEL_ID
if [[ -z "$MODEL_ID" ]]; then
echo "Error: MODEL_ID is not set."
exit 1
fi
# Assign MODEL_NAME and MODEL_REV based on MODEL_ID
case "$MODEL_ID" in
1)
MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
MODEL_REV="0cb88a4f764b7a12671c53f0838cd831a0843b95"
;;
2)
MODEL_NAME="sail/Sailor-4B-Chat"
MODEL_REV="89a866a7041e6ec023dd462adeca8e28dd53c83e"
;;
3)
MODEL_NAME="DeepSeek-R1-Distill-Qwen-32B"
MODEL_REV="d66bcfc2f3fd52799f95943264f32ba15ca0003d"
;;
4)
MODEL_NAME="deepseek-ai/DeepSeek-V3"
MODEL_REV="1d044fd82b15f1cedb197a288e50cc96a2c27205"
;;
*)
echo "Error: Invalid MODEL_ID. Valid values are 1 or 2."
exit 1
;;
esac
printf "Running %s using vLLM OpenAI compatible API Server at port %s\n" $MODEL_NAME "7860"
# https://medium.com/geekculture/the-story-behind-random-seed-42-in-machine-learning-b838c4ac290a
#[Seven and a half million years later…. Fook and Lunkwill are long gone, but their descendants continue what they started]
# “All right,” said Deep Thought. “The Answer to the Great Question…”
# “Yes..!”
# “Of Life, the Universe and Everything…” said Deep Thought.
# “Yes…!”
# “Is…” said Deep Thought, and paused.
# “Yes…!”
# “Is…”
# “Yes…!!!…?”
# “Forty-two,” said Deep Thought, with infinite majesty and calm.”
# ―Douglas Adams, The Hitchhiker’s Guide to the Galaxy
# Run the Python script with the determined values
# Supported tasks: {'generate', 'embedding'}
python -u /app/openai_compatible_api_server.py \
--model "${MODEL_NAME}" \
--task generate \
--revision "${MODEL_REV}" \
--code-revision "${MODEL_REV}" \
--tokenizer-revision "${MODEL_REV}" \
--seed 42 \
--host 0.0.0.0 \
--port 7860 \
--max-num-batched-tokens 32768 \
--max-model-len 32768 \
--dtype float16 \
--enforce-eager \
--gpu-memory-utilization 0.9 \
--enable-prefix-caching \
--disable-log-requests \
--trust-remote-code
|