-
Notifications
You must be signed in to change notification settings - Fork 393
[Recipes][LLM PTQ] Add nvfp4 MSE+FP8-cast-KV recipes (experts_only / mlp_only) + --recipe in example scripts #1407
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -49,18 +49,7 @@ dense | sparsegpt) ;; | |
| ;; | ||
| esac | ||
|
|
||
| #Iterate over list of qformats provided and check if they are valid | ||
| IFS="," | ||
| for qformat in $QFORMAT; do | ||
| case $qformat in | ||
| fp8 | fp8_pc_pt | fp8_pb_wo | int8_wo | int8_sq | int4_awq | w4a8_awq | fp16 | bf16 | nvfp4 | nvfp4_awq | nvfp4_mse | w4a8_nvfp4_fp8 | w4a8_mxfp4_fp8 | nvfp4_experts_only | nvfp4_mlp_only | nvfp4_omlp_only | nvfp4_svdquant | mxfp8 | nvfp4_local_hessian) ;; | ||
| *) | ||
| echo "Unknown quant argument: Expected one of: [fp8, fp8_pc_pt, fp8_pb_wo, int8_wo, int8_sq, int4_awq, w4a8_awq, fp16, bf16, nvfp4, nvfp4_awq, nvfp4_mse, w4a8_nvfp4_fp8, w4a8_mxfp4_fp8, nvfp4_experts_only, nvfp4_mlp_only, nvfp4_omlp_only, nvfp4_svdquant, mxfp8, nvfp4_local_hessian]" >&2 | ||
| exit 1 | ||
| ;; | ||
| esac | ||
| done | ||
| IFS=" " | ||
| # Quant format / recipe validation is delegated to hf_ptq.py. | ||
|
|
||
| script_dir="$(dirname "$(readlink -f "$0")")" | ||
|
|
||
|
|
@@ -72,7 +61,14 @@ fi | |
|
|
||
| QFORMAT_MODIFIED="${QFORMAT//,/_}" | ||
|
|
||
| MODEL_NAME=$(basename $MODEL_PATH | sed 's/[^0-9a-zA-Z\-]/_/g')_${QFORMAT_MODIFIED}${KV_CACHE_QUANT:+_kv_${KV_CACHE_QUANT}} | ||
| # When using --recipe, build the model name from the recipe basename (without | ||
| # directory or .yaml suffix) so each recipe gets its own SAVE_PATH. | ||
| if [ -n "$RECIPE" ]; then | ||
| RECIPE_TAG=$(basename "$RECIPE" .yaml | sed 's/[^0-9a-zA-Z\-]/_/g') | ||
| MODEL_NAME=$(basename $MODEL_PATH | sed 's/[^0-9a-zA-Z\-]/_/g')_recipe_${RECIPE_TAG} | ||
| else | ||
| MODEL_NAME=$(basename $MODEL_PATH | sed 's/[^0-9a-zA-Z\-]/_/g')_${QFORMAT_MODIFIED}${KV_CACHE_QUANT:+_kv_${KV_CACHE_QUANT}} | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: #!/bin/bash
# Read-only verification of word-splitting-sensitive expansions in changed recipe flow.
rg -n 'basename \$MODEL_PATH|QUANT_SPEC_ARGS=.*--recipe=\$RECIPE|^\s+\$QUANT_SPEC_ARGS\s+\\$' examples/llm_ptq/scripts/huggingface_example.shRepository: NVIDIA/Model-Optimizer Length of output: 375 🏁 Script executed: cat -n examples/llm_ptq/scripts/huggingface_example.sh | sed -n '60,75p'Repository: NVIDIA/Model-Optimizer Length of output: 782 🏁 Script executed: cat -n examples/llm_ptq/scripts/huggingface_example.sh | sed -n '170,190p'Repository: NVIDIA/Model-Optimizer Length of output: 890 Quote Unquoted Proposed fix if [ -n "$RECIPE" ]; then
RECIPE_TAG=$(basename "$RECIPE" .yaml | sed 's/[^0-9a-zA-Z\-]/_/g')
- MODEL_NAME=$(basename $MODEL_PATH | sed 's/[^0-9a-zA-Z\-]/_/g')_recipe_${RECIPE_TAG}
+ MODEL_NAME=$(basename "$MODEL_PATH" | sed 's/[^0-9a-zA-Z\-]/_/g')_recipe_${RECIPE_TAG}
else
- MODEL_NAME=$(basename $MODEL_PATH | sed 's/[^0-9a-zA-Z\-]/_/g')_${QFORMAT_MODIFIED}${KV_CACHE_QUANT:+_kv_${KV_CACHE_QUANT}}
+ MODEL_NAME=$(basename "$MODEL_PATH" | sed 's/[^0-9a-zA-Z\-]/_/g')_${QFORMAT_MODIFIED}${KV_CACHE_QUANT:+_kv_${KV_CACHE_QUANT}}
fi
@@
- if [ -n "$RECIPE" ]; then
- QUANT_SPEC_ARGS="--recipe=$RECIPE"
- else
- QUANT_SPEC_ARGS="--qformat=${QFORMAT// /,}"
- fi
+ if [ -n "$RECIPE" ]; then
+ QUANT_SPEC_ARGS=(--recipe="$RECIPE")
+ else
+ QUANT_SPEC_ARGS=(--qformat="${QFORMAT// /,}")
+ fi
python hf_ptq.py \
--pyt_ckpt_path=$MODEL_PATH \
--export_path=$SAVE_PATH \
--sparsity_fmt=$SPARSITY_FMT \
- $QUANT_SPEC_ARGS \
+ "${QUANT_SPEC_ARGS[@]}" \
--calib_size=$CALIB_SIZE \Also applies to lines 177–185. 🧰 Tools🪛 Shellcheck (0.11.0)[info] 68-68: Double quote to prevent globbing and word splitting. (SC2086) [info] 70-70: Double quote to prevent globbing and word splitting. (SC2086) 🤖 Prompt for AI Agents |
||
| fi | ||
|
|
||
| SAVE_PATH=${ROOT_SAVE_PATH}/saved_models_${MODEL_NAME} | ||
|
|
||
|
|
@@ -177,11 +173,16 @@ if [[ $TASKS =~ "quant" ]] || [[ ! -d "$SAVE_PATH" ]] || [[ ! $(ls -A $SAVE_PATH | |
|
|
||
| if [[ "$MODEL_CONFIG_EXIST" == false ]]; then | ||
| echo "Quantizing original model..." | ||
| if [ -n "$RECIPE" ]; then | ||
| QUANT_SPEC_ARGS="--recipe=$RECIPE" | ||
| else | ||
| QUANT_SPEC_ARGS="--qformat=${QFORMAT// /,}" | ||
| fi | ||
| python hf_ptq.py \ | ||
| --pyt_ckpt_path=$MODEL_PATH \ | ||
| --export_path=$SAVE_PATH \ | ||
| --sparsity_fmt=$SPARSITY_FMT \ | ||
| --qformat="${QFORMAT// /,}" \ | ||
| $QUANT_SPEC_ARGS \ | ||
| --calib_size=$CALIB_SIZE \ | ||
| --batch_size=$CALIB_BATCH_SIZE \ | ||
| --inference_tensor_parallel=$TP \ | ||
|
|
@@ -203,7 +204,7 @@ if [[ $TASKS =~ "quant" ]] || [[ ! -d "$SAVE_PATH" ]] || [[ ! $(ls -A $SAVE_PATH | |
| exit 0 | ||
| fi | ||
|
|
||
| if [[ "$QFORMAT" == *"nvfp4"* ]] || [[ "$KV_CACHE_QUANT" == *"nvfp4"* ]]; then | ||
| if [[ "$QFORMAT" == *"nvfp4"* ]] || [[ "$KV_CACHE_QUANT" == *"nvfp4"* ]] || [[ "$RECIPE" == *"nvfp4"* ]]; then | ||
| cuda_major=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader -i 0 | cut -d. -f1) | ||
|
|
||
| if [ "$cuda_major" -lt 10 ]; then | ||
|
|
@@ -212,6 +213,11 @@ if [[ $TASKS =~ "quant" ]] || [[ ! -d "$SAVE_PATH" ]] || [[ ! $(ls -A $SAVE_PATH | |
| fi | ||
| fi | ||
|
|
||
| if [ -n "$RECIPE" ]; then | ||
| echo "Recipe $RECIPE used. Please deploy with TensorRT-LLM directly. Checkpoint export_path: $SAVE_PATH" | ||
| exit 0 | ||
| fi | ||
|
|
||
| if [[ ! " fp8 nvfp4 bf16 fp16 " =~ " ${QFORMAT} " ]]; then | ||
| echo "Quant $QFORMAT specified. Please read TensorRT-LLM quantization support matrix https://nvidia.github.io/TensorRT-LLM/features/quantization.html#quantization-in-tensorrt-llm and use TensorRT-LLM for deployment. Checkpoint export_path: $SAVE_PATH" | ||
| exit 0 | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,48 @@ | ||
| # SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | ||
| # SPDX-License-Identifier: Apache-2.0 | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
|
|
||
| imports: | ||
| base_disable_all: configs/ptq/units/base_disable_all | ||
| default_disabled_quantizers: configs/ptq/units/default_disabled_quantizers | ||
| nvfp4: configs/numerics/nvfp4 | ||
| nvfp4_static: configs/numerics/nvfp4_static | ||
| kv_fp8_cast: configs/ptq/units/kv_fp8_cast | ||
|
|
||
| metadata: | ||
| recipe_type: ptq | ||
| description: NVFP4 static weight (MSE FP8-scale sweep) and dynamic activation for expert layers only (W4A4), FP8 KV cache with constant amax. | ||
| quantize: | ||
| algorithm: | ||
| method: mse | ||
| fp8_scale_sweep: true | ||
| # layerwise=false required for VLMs where the decoder layers are nested under | ||
| # `model.language_model.layers` (layerwise_calibrate can't find them otherwise). | ||
| layerwise: false | ||
| quant_cfg: | ||
| - $import: base_disable_all | ||
| - quantizer_name: '*mlp.experts*weight_quantizer' | ||
| cfg: | ||
| $import: nvfp4_static | ||
| - quantizer_name: '*mlp.experts*input_quantizer' | ||
| cfg: | ||
| $import: nvfp4 | ||
| - quantizer_name: '*block_sparse_moe*weight_quantizer' | ||
| cfg: | ||
| $import: nvfp4_static | ||
| - quantizer_name: '*block_sparse_moe*input_quantizer' | ||
| cfg: | ||
| $import: nvfp4 | ||
| - $import: kv_fp8_cast | ||
| - $import: default_disabled_quantizers |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,54 @@ | ||||||
| # SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | ||||||
| # SPDX-License-Identifier: Apache-2.0 | ||||||
| # | ||||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
| # you may not use this file except in compliance with the License. | ||||||
| # You may obtain a copy of the License at | ||||||
| # | ||||||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||||||
| # | ||||||
| # Unless required by applicable law or agreed to in writing, software | ||||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
| # See the License for the specific language governing permissions and | ||||||
| # limitations under the License. | ||||||
|
|
||||||
| imports: | ||||||
| base_disable_all: configs/ptq/units/base_disable_all | ||||||
| default_disabled_quantizers: configs/ptq/units/default_disabled_quantizers | ||||||
| nvfp4: configs/numerics/nvfp4 | ||||||
| nvfp4_static: configs/numerics/nvfp4_static | ||||||
| kv_fp8_cast: configs/ptq/units/kv_fp8_cast | ||||||
|
|
||||||
| metadata: | ||||||
| recipe_type: ptq | ||||||
| description: NVFP4 static weight (MSE FP8-scale sweep) and dynamic activation for all linear layers (W4A4), FP8 KV cache with constant amax. | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Metadata description overstates the quantization scope. Line 25 says “all linear layers,” but this recipe only enables quantizers for MLP/MoE/expert patterns. Please align the text to the actual scope to avoid user confusion. Proposed fix- description: NVFP4 static weight (MSE FP8-scale sweep) and dynamic activation for all linear layers (W4A4), FP8 KV cache with constant amax.
+ description: NVFP4 static weight (MSE FP8-scale sweep) and dynamic activation for MLP/MoE linear layers (W4A4), FP8 KV cache with constant amax.📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||
| quantize: | ||||||
| algorithm: | ||||||
| method: mse | ||||||
| fp8_scale_sweep: true | ||||||
| # layerwise=false required for VLMs where the decoder layers are nested under | ||||||
| # `model.language_model.layers` (layerwise_calibrate can't find them otherwise). | ||||||
| layerwise: false | ||||||
| quant_cfg: | ||||||
| - $import: base_disable_all | ||||||
| - quantizer_name: '*mlp*weight_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4_static | ||||||
| - quantizer_name: '*mlp*input_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4 | ||||||
| - quantizer_name: '*block_sparse_moe*weight_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4_static | ||||||
| - quantizer_name: '*block_sparse_moe*input_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4 | ||||||
| - quantizer_name: '*.experts.*weight_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4_static | ||||||
| - quantizer_name: '*.experts.*input_quantizer' | ||||||
| cfg: | ||||||
| $import: nvfp4 | ||||||
| - $import: kv_fp8_cast | ||||||
| - $import: default_disabled_quantizers | ||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Regression: deleting the
for qformat in $QFORMAT; do … doneloop also drops the implicit binding of the lowercase loop variable$qformat, which is still used below atif [ "$qformat" == "bf16" ] || [ "$qformat" == "fp16" ]. With the loop removed,$qformatis empty and that bf16/fp16 shortcut (which symlinks the source model into$SAVE_PATHand marksMODEL_CONFIG_EXIST=true) will never trigger — users running--quant=bf16or--quant=fp16will now fall through topython hf_ptq.py --qformat=bf16instead. Either replace$qformatwith$QFORMATin that check, or add a dedicatedqformat="$QFORMAT"assignment here.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we still need bf16/fp16 path anyway? Maybe we can deprecate them
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not sure if we still have the use cases where we quantize fp32 to fp16.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yeah I think we can delete. Let me add this to the PR