Skip to content
Open
124 changes: 124 additions & 0 deletions modelopt_recipes/models/Nemotron-3-Super-120B-A12B/super-nvfp4.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Mirrors the published nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 hf_quant_config.json:
# - MoE routed experts (mixer.experts.<N>.{up,down}_proj): NVFP4 W4A4 weight MSE, group_size 16
# - MoE shared experts (mixer.shared_experts.{up,down}_proj): FP8 per-tensor
# - Mamba mixer linears (mixer.{in,out}_proj): FP8 per-tensor
# - KV cache: FP8
# - Attention linears ({q,k,v}_proj): BF16 (not quantized)
# - MTP head, lm_head, output, mamba conv1d: BF16 (not quantized)
# - Latent MOE (fc1_latent_proj, fc2_latent_proj): BF16 (not quantized)
# - SSM cache: FP32 (can be set to FP16 in VLLM)
#
# Calibration: weight MSE with FP8-scale sweep over the 128 e4m3 scale values
# (NVFP4 weights use static block scales selected by MSE; FP8 per-tensor scales
# are also chosen via MSE search instead of plain amax).
metadata:
recipe_type: ptq
description: Super NVFP4 mixed precision — sparse MoE experts NVFP4 (W4A4, group_size 16); shared experts, mamba in/out_proj, and attention o_proj/fc1_latent_proj/fc2_latent_proj
FP8 per-tensor; FP8 KV cache; lm_head/MTP/SSM stay BF16/FP16. Weight-MSE calibration with FP8 scale sweep.
Comment thread
coderabbitai[bot] marked this conversation as resolved.
Outdated
quantize:
algorithm:
method: mse
fp8_scale_sweep: true
quant_cfg:
- quantizer_name: '*'
enable: false

# MoE routed experts -> NVFP4 W4A4, block_size 16, e4m3 scale.
# Weight uses static block scales (chosen by MSE); activations stay dynamic.
- quantizer_name: '*mixer.experts.*weight_quantizer'
enable: true
cfg:
block_sizes:
-1: 16
type: static
scale_bits: e4m3
num_bits: e2m1
- quantizer_name: '*mixer.experts.*input_quantizer'
enable: true
cfg:
block_sizes:
-1: 16
type: dynamic
scale_bits: e4m3
num_bits: e2m1

# MoE shared experts -> FP8 per-tensor.
- quantizer_name: '*mixer.shared_experts.*weight_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.shared_experts.*input_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:

# Mamba mixer linears -> FP8 per-tensor.
- quantizer_name: '*mixer.in_proj*weight_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.in_proj*input_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.out_proj*weight_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.out_proj*input_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:

# latent MOE down/up projections) -> FP8 per-tensor.
# NOTE: only 3 layers quantized latent MOE to FP8, layers 1, 3, 5
- quantizer_name: '*mixer.fc1_latent_proj*weight_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.fc1_latent_proj*input_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.fc2_latent_proj*weight_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
- quantizer_name: '*mixer.fc2_latent_proj*input_quantizer'
enable: true
cfg:
num_bits: e4m3
axis:
Comment thread
coderabbitai[bot] marked this conversation as resolved.
Outdated

# KV cache -> FP8.
- quantizer_name: '*[kv]_bmm_quantizer'
enable: true
cfg:
num_bits: e4m3

# Stay BF16: lm_head, output projection, MoE routers/gates, MTP head.
# SSM state / mamba conv1d stay FP16.
Loading