From 783479967ee295ee2261d189a5e7771452819968 Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Wed, 22 Apr 2026 20:11:06 +0000 Subject: [PATCH 1/7] Add Kimi-K2.5 FP4 + Eagle3 speculative decoding config for MI355X - Co-authored with Li, Larry and Li, Chao Changes: - amd-master.yaml: add kimik2.5-fp4-mi355x-vllm-eagle3 entry sweeping tp={4,8} x conc={4..64} on both 1k1k and 8k1k. Uses the vllm-rocm nightly required for Eagle3 support. - benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh: new benchmark script that reuses the kimik2.5_fp4_mi355x.sh recipe and adds --speculative-config for the Eagle3 draft (lightseekorg/kimi-k2.5-eagle3, num_speculative_tokens=7, draft_tensor_parallel_size=1). --block-size is intentionally omitted: the Eagle3 draft uses standard attention on ROCm and no ROCm standard-attention backend supports block_size=1. - utils/matrix_logic/validation.py (+ test_validation.py): extend the spec-decoding Literal from {mtp, draft_model, none} to include eagle3. - runners/launch_mi355x-amds.sh: extend SPEC_SUFFIX so SPEC_DECODING=eagle3 resolves to the _eagle3 benchmark script variant. --- .github/configs/amd-master.yaml | 20 +++ .../single_node/kimik2.5_fp4_mi355x_eagle3.sh | 126 ++++++++++++++++++ runners/launch_mi355x-amds.sh | 6 +- utils/matrix_logic/test_validation.py | 2 +- utils/matrix_logic/validation.py | 8 +- 5 files changed, 156 insertions(+), 6 deletions(-) create mode 100755 benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 9e1f9834e..b7766de3a 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -431,6 +431,26 @@ kimik2.5-fp4-mi355x-vllm: - { tp: 8, conc-start: 4, conc-end: 64 } - { tp: 4, conc-start: 4, conc-end: 64 } +kimik2.5-fp4-mi355x-vllm-eagle3: + image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 + model: amd/Kimi-K2.5-MXFP4 + model-prefix: kimik2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + kimik2.5-fp4-mi355x-atom: image: rocm/atom:rocm7.2.1-ubuntu24.04-pytorch2.9.1-atom0.1.2 model: amd/Kimi-K2.5-MXFP4 diff --git a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh new file mode 100755 index 000000000..6be15d8ba --- /dev/null +++ b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash + +# Kimi-K2.5 MXFP4 + Eagle3 speculative decoding on MI355X (vLLM). +# Adds `--speculative-config` on top of the plain kimik2.5_fp4_mi355x.sh flow. +# +# Draft model: lightseekorg/kimi-k2.5-eagle3 (~6 GB BF16, Eagle3 MTP head) +# Spec tokens: 7 (reproduced baseline: 764.1 +/- 35.7 tok/s/gpu @ TP=4, 1k1k, conc=64) +# Draft TP: 1 (draft runs on a single GPU; target occupies $TP) + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +# Draft model (Eagle3 head). Override via SPEC_DRAFT_MODEL if needed. +SPEC_DRAFT_MODEL="${SPEC_DRAFT_MODEL:-lightseekorg/kimi-k2.5-eagle3}" +SPEC_NUM_TOKENS="${SPEC_NUM_TOKENS:-7}" +SPEC_DRAFT_TP="${SPEC_DRAFT_TP:-1}" + +hf download "$MODEL" +hf download "$SPEC_DRAFT_MODEL" + +# Install amd-quark for MXFP4 quantization support +# need to manually install due to ROCm vLLM bug +# https://github.com/vllm-project/vllm/issues/35633 +pip install amd-quark + +# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+ +if [ -n "$ROCR_VISIBLE_DEVICES" ]; then + export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES" +fi + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +fi + +# If the machine runs a MEC FW older than 177, RCCL +# cannot reclaim some memory. +# Disable that features to avoid crashes. +# This is related to the changes in the driver at: +# https://rocm.docs.amd.com/en/docs-6.4.3/about/release-notes.html#amdgpu-driver-updates +version=`rocm-smi --showfw | grep MEC | head -n 1 | awk '{print $NF}'` +if [[ "$version" == "" || $version -lt 177 ]]; then + export HSA_NO_SCRATCH_RECLAIM=1 +fi + +export VLLM_ROCM_USE_AITER=1 +export VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=INT4 + +# Disable AITER RMSNorm for TP < 8 due to accuracy issues +if [ "${TP}" -lt 8 ]; then + export VLLM_ROCM_USE_AITER_RMSNORM=0 +fi + +if [ "${EP_SIZE:-0}" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Eagle3 speculative config. Single-quoted JSON passed as one arg so spaces/braces +# survive bash word-splitting when expanded into the vllm invocation below. +SPEC_CONFIG="{\"model\":\"${SPEC_DRAFT_MODEL}\",\"method\":\"eagle3\",\"num_speculative_tokens\":${SPEC_NUM_TOKENS},\"draft_tensor_parallel_size\":${SPEC_DRAFT_TP}}" + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +# NOTE: --block-size is intentionally omitted (unlike the non-spec kimik2.5_fp4_mi355x.sh +# which sets --block-size=1). The target MLA path (ROCM_AITER_MLA) accepts block_size=1, +# but the Eagle3 draft is a Llama model using standard attention and no ROCm standard- +# attention backend (ROCM_ATTN, ROCM_AITER_FA, ROCM_AITER_UNIFIED_ATTN, TRITON_ATTN) +# supports block_size=1. Letting vLLM pick the default matches the proven +# tmp_scripts/start_server.sh recipe that reproduced 764.1 +/- 35.7 tok/s/gpu. +set -x +vllm serve $MODEL --port $PORT \ +--tensor-parallel-size=$TP \ +$EP \ +--gpu-memory-utilization 0.90 \ +--max-model-len $MAX_MODEL_LEN \ +--no-enable-prefix-caching \ +--trust-remote-code \ +--mm-encoder-tp-mode data \ +--speculative-config "$SPEC_CONFIG" > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/runners/launch_mi355x-amds.sh b/runners/launch_mi355x-amds.sh index 5e3225b81..859dc60f9 100644 --- a/runners/launch_mi355x-amds.sh +++ b/runners/launch_mi355x-amds.sh @@ -179,7 +179,11 @@ else export PORT_OFFSET=${RUNNER_NAME: -1} export PORT=$(( 8888 + ${PORT_OFFSET} )) FRAMEWORK_SUFFIX=$([[ "$FRAMEWORK" == "atom" ]] && printf '_atom' || printf '') - SPEC_SUFFIX=$([[ "$SPEC_DECODING" == "mtp" ]] && printf '_mtp' || printf '') + case "$SPEC_DECODING" in + mtp) SPEC_SUFFIX='_mtp' ;; + eagle3) SPEC_SUFFIX='_eagle3' ;; + *) SPEC_SUFFIX='' ;; + esac PARTITION="compute" SQUASH_FILE="/var/lib/squash/$(echo "$IMAGE" | sed 's/[\/:@#]/_/g').sqsh" diff --git a/utils/matrix_logic/test_validation.py b/utils/matrix_logic/test_validation.py index 0f1f44c27..636875e39 100644 --- a/utils/matrix_logic/test_validation.py +++ b/utils/matrix_logic/test_validation.py @@ -275,7 +275,7 @@ def test_conc_as_list(self, valid_single_node_matrix_entry): def test_spec_decoding_values(self, valid_single_node_matrix_entry): """Spec decoding should accept valid literal values.""" - for value in ["mtp", "draft_model", "none"]: + for value in ["mtp", "draft_model", "eagle3", "none"]: valid_single_node_matrix_entry["spec-decoding"] = value entry = SingleNodeMatrixEntry(**valid_single_node_matrix_entry) assert entry.spec_decoding == value diff --git a/utils/matrix_logic/validation.py b/utils/matrix_logic/validation.py index ce10840b5..bdf7a8494 100644 --- a/utils/matrix_logic/validation.py +++ b/utils/matrix_logic/validation.py @@ -77,7 +77,7 @@ class SingleNodeMatrixEntry(BaseModel): model_prefix: str = Field(alias=Fields.MODEL_PREFIX.value) precision: str framework: str - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( alias=Fields.SPEC_DECODING.value ) runner: str @@ -116,7 +116,7 @@ class MultiNodeMatrixEntry(BaseModel): model_prefix: str = Field(alias=Fields.MODEL_PREFIX.value) precision: str framework: str - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( alias=Fields.SPEC_DECODING.value ) runner: str @@ -204,7 +204,7 @@ class SingleNodeSearchSpaceEntry(BaseModel): tp: int ep: Optional[int] = None - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( default="none", alias=Fields.SPEC_DECODING.value) dp_attn: Optional[bool] = Field( default=None, alias=Fields.DP_ATTN.value) @@ -224,7 +224,7 @@ class MultiNodeSearchSpaceEntry(BaseModel): """Multinode search space configuration.""" model_config = ConfigDict(extra='forbid', populate_by_name=True) - spec_decoding: Literal["mtp", "draft_model", "none"] = Field( + spec_decoding: Literal["mtp", "draft_model", "eagle3", "none"] = Field( default="none", alias=Fields.SPEC_DECODING.value) prefill: WorkerConfig decode: WorkerConfig From 1c51d2ea17cd7098db000ee0cd3faa3f3a0d6416 Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Wed, 22 Apr 2026 20:21:01 +0000 Subject: [PATCH 2/7] Update perf-changelog.yaml --- perf-changelog.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 4a539e2c0..3ae04d554 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1702,3 +1702,12 @@ description: - "Add VLLM_FLOAT32_MATMUL_PRECISION=high" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1069 + +- config-keys: + - kimik2.5-fp4-mi355x-vllm-eagle3 + description: + - "Add Kimi-K2.5 FP4 vLLM Eagle3 speculative decoding config for MI355X" + - "Image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48" + - "Model: amd/Kimi-K2.5-MXFP4" + - "Draft model: lightseekorg/kimi-k2.5-eagle3 (num_speculative_tokens=7, draft_tp=1)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1116 From e605f96addaf222b4ceb83059bf4bc8926e89243 Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Wed, 22 Apr 2026 20:36:14 +0000 Subject: [PATCH 3/7] Address Claude PR review on kimik2.5-fp4-mi355x-vllm-eagle3 - benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh: add --use-chat-template to run_benchmark_serving. Eagle3 is EAGLE-family speculative decoding and the Eagle3 draft is trained on chat-formatted inputs; AGENTS.md requires this flag for all *_mtp.sh scripts and every peer script in the repo passes it. Without it, acceptance rate and throughput are silently distorted. - benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh: fix the SPEC_CONFIG comment to accurately describe the double-quoted bash string mechanism (was mis-described as single-quoted). - .github/workflows/README.md: rephrase the spec-decoding Literal example to point at utils/matrix_logic/validation.py as the source of truth, instead of enumerating values that drift whenever the Literal changes. Addresses review comments on PR #1116. Made-with: Cursor --- .github/workflows/README.md | 2 +- benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh | 9 +-------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/README.md b/.github/workflows/README.md index de0a3dcab..1a83d6b69 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -235,7 +235,7 @@ The corresponding `SingleNodeMatrixEntry` enforces these same fields with approp 2. **`extra='forbid'`**: Unknown fields are rejected, preventing typos or deprecated fields from slipping through. -3. **Strict typing**: Fields like `spec-decoding` use `Literal["mtp", "draft_model", "none"]` to restrict values to known options. +3. **Strict typing**: Fields like `spec-decoding` use a `Literal` type to restrict values to a fixed set of known options (see `utils/matrix_logic/validation.py` for the current set). 4. **Concurrency validation**: The system ensures either `conc-list` OR `conc-start`/`conc-end` is provided, but not both. diff --git a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh index 6be15d8ba..618c7c830 100755 --- a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh +++ b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh @@ -73,19 +73,11 @@ else EP=" " fi -# Eagle3 speculative config. Single-quoted JSON passed as one arg so spaces/braces -# survive bash word-splitting when expanded into the vllm invocation below. SPEC_CONFIG="{\"model\":\"${SPEC_DRAFT_MODEL}\",\"method\":\"eagle3\",\"num_speculative_tokens\":${SPEC_NUM_TOKENS},\"draft_tensor_parallel_size\":${SPEC_DRAFT_TP}}" # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor -# NOTE: --block-size is intentionally omitted (unlike the non-spec kimik2.5_fp4_mi355x.sh -# which sets --block-size=1). The target MLA path (ROCM_AITER_MLA) accepts block_size=1, -# but the Eagle3 draft is a Llama model using standard attention and no ROCm standard- -# attention backend (ROCM_ATTN, ROCM_AITER_FA, ROCM_AITER_UNIFIED_ATTN, TRITON_ATTN) -# supports block_size=1. Letting vLLM pick the default matches the proven -# tmp_scripts/start_server.sh recipe that reproduced 764.1 +/- 35.7 tok/s/gpu. set -x vllm serve $MODEL --port $PORT \ --tensor-parallel-size=$TP \ @@ -113,6 +105,7 @@ run_benchmark_serving \ --max-concurrency "$CONC" \ --result-filename "$RESULT_FILENAME" \ --result-dir /workspace/ \ + --use-chat-template \ --trust-remote-code # After throughput, run evaluation only if RUN_EVAL is true From a3b35a3e412ab80f2af01752925da982c46a2e5e Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Wed, 22 Apr 2026 21:04:19 +0000 Subject: [PATCH 4/7] Drop redundant pip install amd-quark from Eagle3 benchmark script --- benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh index 618c7c830..1ceef720e 100755 --- a/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh +++ b/benchmarks/single_node/kimik2.5_fp4_mi355x_eagle3.sh @@ -31,11 +31,6 @@ SPEC_DRAFT_TP="${SPEC_DRAFT_TP:-1}" hf download "$MODEL" hf download "$SPEC_DRAFT_MODEL" -# Install amd-quark for MXFP4 quantization support -# need to manually install due to ROCm vLLM bug -# https://github.com/vllm-project/vllm/issues/35633 -pip install amd-quark - # Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+ if [ -n "$ROCR_VISIBLE_DEVICES" ]; then export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES" From c06b5e9d06496ac0bbada63fcbc1ed2dd4ff0d98 Mon Sep 17 00:00:00 2001 From: larryli2-amd Date: Mon, 27 Apr 2026 14:50:21 +0800 Subject: [PATCH 5/7] Update amd-master.yaml Add Kimi-K2.5 eagle3 ISL/OSL=1K/8K experiments --- .github/configs/amd-master.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index b7766de3a..00d45515c 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -445,6 +445,11 @@ kimik2.5-fp4-mi355x-vllm-eagle3: search-space: - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } + - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: eagle3 } - isl: 8192 osl: 1024 search-space: From 09ea693158a6c13b85053c692fddec9409261a4f Mon Sep 17 00:00:00 2001 From: larryli2-amd Date: Tue, 28 Apr 2026 14:37:06 +0800 Subject: [PATCH 6/7] Update amd-master.yaml Add Kimi-K2.5 FP4 for docker image vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 --- .github/configs/amd-master.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 00d45515c..0e7bcca12 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -431,6 +431,26 @@ kimik2.5-fp4-mi355x-vllm: - { tp: 8, conc-start: 4, conc-end: 64 } - { tp: 4, conc-start: 4, conc-end: 64 } +kimik2.5-fp4-mi355x-vllm: + image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 + model: amd/Kimi-K2.5-MXFP4 + model-prefix: kimik2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + kimik2.5-fp4-mi355x-vllm-eagle3: image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 model: amd/Kimi-K2.5-MXFP4 From 4a8f4351890491f21005dc1f2ac5631aa9f81040 Mon Sep 17 00:00:00 2001 From: larryli2-amd Date: Tue, 28 Apr 2026 14:38:04 +0800 Subject: [PATCH 7/7] Update amd-master.yaml --- .github/configs/amd-master.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 0e7bcca12..11cb06f46 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -431,7 +431,7 @@ kimik2.5-fp4-mi355x-vllm: - { tp: 8, conc-start: 4, conc-end: 64 } - { tp: 4, conc-start: 4, conc-end: 64 } -kimik2.5-fp4-mi355x-vllm: +kimik2.5-fp4-mi355x-vllm-new: image: vllm/vllm-openai-rocm:nightly-4eafc729285e459a5fc96efd6f7b313b155cad48 model: amd/Kimi-K2.5-MXFP4 model-prefix: kimik2.5