diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 9e1f9834e..11cb06f46 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -431,6 +431,51 @@ kimik2.5-fp4-mi355x-vllm: - { tp: 8, conc-start: 4, conc-end: 64 } - { tp: 4, conc-start: 4, conc-end: 64 } +kimik2.5-fp4-mi355x-vllm-new: + image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 + model: amd/Kimi-K2.5-MXFP4 + model-prefix: kimik2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + +kimik2.5-fp4-mi355x-vllm-eagle3: + image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 + model: amd/Kimi-K2.5-MXFP4 + model-prefix: kimik2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + kimik2.5-fp4-mi355x-atom: image: rocm/atom:rocm7.2.1-ubuntu24.04-pytorch2.9.1-atom0.1.2 model: amd/Kimi-K2.5-MXFP4 diff --git a/.github/workflows/README.md b/.github/workflows/README.md index de0a3dcab..1a83d6b69 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -235,7 +235,7 @@ The corresponding `SingleNodeMatrixEntry` enforces these same fields with approp 2. **`extra='forbid'`**: Unknown fields are rejected, preventing typos or deprecated fields from slipping through. -3. **Strict typing**: Fields like `spec-decoding` use `Literal["mtp", "draft_model", "none"]` to restrict values to known options. +3. **Strict typing**: Fields like `spec-decoding` use a `Literal` type to restrict values to a fixed set of known options (see `utils/matrix_logic/validation.py` for the current set). 4. **Concurrency validation**: The system ensures either `conc-list` OR `conc-start`/`conc-end` is provided, but not both. diff --git a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh new file mode 100755 index 000000000..1ceef720e --- /dev/null +++ b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash + +# Kimi-K2.5 MXFP4 + Eagle3 speculative decoding on MI355X (vLLM). +# Adds `--speculative-config` on top of the plain kimik2.5_fp4_mi355x.sh flow. +# +# Draft model: lightseekorg/kimi-k2.5-eagle3 (~6 GB BF16, Eagle3 MTP head) +# Spec tokens: 7 (reproduced baseline: 764.1 +/- 35.7 tok/s/gpu @ TP=4, 1k1k, conc=64) +# Draft TP: 1 (draft runs on a single GPU; target occupies $TP) + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +# Draft model (Eagle3 head). Override via SPEC_DRAFT_MODEL if needed. +SPEC_DRAFT_MODEL="${SPEC_DRAFT_MODEL:-lightseekorg/kimi-k2.5-eagle3}" +SPEC_NUM_TOKENS="${SPEC_NUM_TOKENS:-7}" +SPEC_DRAFT_TP="${SPEC_DRAFT_TP:-1}" + +hf download "$MODEL" +hf download "$SPEC_DRAFT_MODEL" + +# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+ +if [ -n "$ROCR_VISIBLE_DEVICES" ]; then + export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES" +fi + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +fi + +# If the machine runs a MEC FW older than 177, RCCL +# cannot reclaim some memory. +# Disable that features to avoid crashes. +# This is related to the changes in the driver at: +# https://rocm.docs.amd.com/en/docs-6.4.3/about/release-notes.html#amdgpu-driver-updates +version=`rocm-smi --showfw | grep MEC | head -n 1 | awk '{print $NF}'` +if [[ "$version" == "" || $version -lt 177 ]]; then + export HSA_NO_SCRATCH_RECLAIM=1 +fi + +export VLLM_ROCM_USE_AITER=1 +export VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=INT4 + +# Disable AITER RMSNorm for TP < 8 due to accuracy issues +if [ "${TP}" -lt 8 ]; then + export VLLM_ROCM_USE_AITER_RMSNORM=0 +fi + +if [ "${EP_SIZE:-0}" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +SPEC_CONFIG="{\"model\":\"${SPEC_DRAFT_MODEL}\",\"method\":\"eagle3\",\"num_speculative_tokens\":${SPEC_NUM_TOKENS},\"draft_tensor_parallel_size\":${SPEC_DRAFT_TP}}" + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +vllm serve $MODEL --port $PORT \ +--tensor-parallel-size=$TP \ +$EP \ +--gpu-memory-utilization 0.90 \ +--max-model-len $MAX_MODEL_LEN \ +--no-enable-prefix-caching \ +--trust-remote-code \ +--mm-encoder-tp-mode data \ +--speculative-config "$SPEC_CONFIG" > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --use-chat-template \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 4a539e2c0..3ae04d554 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1702,3 +1702,12 @@ description: - "Add VLLM_FLOAT32_MATMUL_PRECISION=high" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1069 + +- config-keys: + - kimik2.5-fp4-mi355x-vllm-eagle3 + description: + - "Add Kimi-K2.5 FP4 vLLM Eagle3 speculative decoding config for MI355X" + - "Image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48" + - "Model: amd/Kimi-K2.5-MXFP4" + - "Draft model: lightseekorg/kimi-k2.5-eagle3 (num_speculative_tokens=7, draft_tp=1)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1116 diff --git a/runners/launch_mi355x-amds.sh b/runners/launch_mi355x-amds.sh index 5e3225b81..859dc60f9 100644 --- a/runners/launch_mi355x-amds.sh +++ b/runners/launch_mi355x-amds.sh @@ -179,7 +179,11 @@ else export PORT_OFFSET=${RUNNER_NAME: -1} export PORT=$(( 8888 + ${PORT_OFFSET} )) FRAMEWORK_SUFFIX=$([[ "$FRAMEWORK" == "atom" ]] && printf '_atom' || printf '') - SPEC_SUFFIX=$([[ "$SPEC_DECODING" == "mtp" ]] && printf '_mtp' || printf '') + case "$SPEC_DECODING" in + mtp) SPEC_SUFFIX='_mtp' ;; + eagle3) SPEC_SUFFIX='_eagle3' ;; + *) SPEC_SUFFIX='' ;; + esac PARTITION="compute" SQUASH_FILE="/var/lib/squash/$(echo "$IMAGE" | sed 's/[\/:@#]/_/g').sqsh" diff --git a/utils/matrix_logic/test_validation.py b/utils/matrix_logic/test_validation.py index 0f1f44c27..636875e39 100644 --- a/utils/matrix_logic/test_validation.py +++ b/utils/matrix_logic/test_validation.py @@ -275,7 +275,7 @@ def test_conc_as_list(self, valid_single_node_matrix_entry): def test_spec_decoding_values(self, valid_single_node_matrix_entry): """Spec decoding should accept valid literal values.""" - for value in ["mtp", "draft_model", "none"]: + for value in ["mtp", "draft_model", "eagle3", "none"]: valid_single_node_matrix_entry["spec-decoding"] = value entry = SingleNodeMatrixEntry(**valid_single_node_matrix_entry) assert entry.spec_decoding == value diff --git a/utils/matrix_logic/validation.py b/utils/matrix_logic/validation.py index ce10840b5..bdf7a8494 100644 --- a/utils/matrix_logic/validation.py +++ b/utils/matrix_logic/validation.py @@ -77,7 +77,7 @@ class SingleNodeMatrixEntry(BaseModel): model_prefix: str = Field(alias=Fields.MODEL_PREFIX.value) precision: str framework: str - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( alias=Fields.SPEC_DECODING.value ) runner: str @@ -116,7 +116,7 @@ class MultiNodeMatrixEntry(BaseModel): model_prefix: str = Field(alias=Fields.MODEL_PREFIX.value) precision: str framework: str - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( alias=Fields.SPEC_DECODING.value ) runner: str @@ -204,7 +204,7 @@ class SingleNodeSearchSpaceEntry(BaseModel): tp: int ep: Optional[int] = None - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( default="none", alias=Fields.SPEC_DECODING.value) dp_attn: Optional[bool] = Field( default=None, alias=Fields.DP_ATTN.value) @@ -224,7 +224,7 @@ class MultiNodeSearchSpaceEntry(BaseModel): """Multinode search space configuration.""" model_config = ConfigDict(extra='forbid', populate_by_name=True) - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( default="none", alias=Fields.SPEC_DECODING.value) prefill: WorkerConfig decode: WorkerConfig