Adjusted model parameters

This commit is contained in:
Eugene Rakhmatulin
2026-03-12 12:59:05 -07:00
parent 122edc8229
commit 6f9a2f981c

View File

@@ -1,44 +1,44 @@
# Recipe: Nemotron-3-Super-NVFP4 # Recipe: Nemotron-3-Super-NVFP4
# Optimized for Marlin backend throughput # Optimized for Marlin backend throughput
recipe_version: "2" recipe_version: "1"
name: Nemotron-3-Super-NVFP4-Marlin-Optimized name: Nemotron-3-Super-NVFP4-Marlin-Optimized
description: vLLM serving Nemotron-3-Super-120B using Marlin kernels description: vLLM serving Nemotron-3-Super-120B using Marlin kernels
model: nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 model: nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4
container: vllm-node container: vllm-node
cluster_only: false
# This model can only run on single node (solo)
solo_only: true solo_only: true
mods: mods:
- mods/nemotron-super - mods/nemotron-super
container: vllm-node
defaults: defaults:
port: 8888 port: 8000
host: 0.0.0.0 host: 0.0.0.0
tensor_parallel: 1 tensor_parallel: 1
gpu_memory_utilization: 0.7 gpu_memory_utilization: 0.7
max_model_len: 262144 max_model_len: 262144
max_num_seqs: 8 max_num_seqs: 10
env: env:
# Marlin performance overrides
VLLM_NVFP4_GEMM_BACKEND: "marlin" VLLM_NVFP4_GEMM_BACKEND: "marlin"
VLLM_TEST_FORCE_FP8_MARLIN: "1" VLLM_TEST_FORCE_FP8_MARLIN: "1"
VLLM_MARLIN_USE_ATOMIC_ADD: "1" VLLM_MARLIN_USE_ATOMIC_ADD: "1"
# Disable conflicting backends
VLLM_FP8_BACKEND: "marlin"
VLLM_SCALED_MM_BACKEND: "marlin"
command: | command: |
vllm serve nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 \ vllm serve nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 \
--kv-cache-dtype fp8 \
-tp {tensor_parallel} \
--trust-remote-code \
--gpu-memory-utilization {gpu_memory_utilization} \
--max-model-len {max_model_len} \ --max-model-len {max_model_len} \
--max-num-seqs {max_num_seqs} \ --max-num-seqs {max_num_seqs} \
--port {port} --host {host} \ --enable-prefix-caching \
--trust-remote-code \ --host {host} \
--tensor-parallel-size {tensor_parallel} \ --port {port} \
--kv-cache-dtype fp8 \
--load-format fastsafetensors \
--gpu-memory-utilization {gpu_memory_utilization} \
--enable-auto-tool-choice \ --enable-auto-tool-choice \
--load-format fastsafetensors \
--tool-call-parser qwen3_coder \ --tool-call-parser qwen3_coder \
--reasoning-parser-plugin super_v3_reasoning_parser.py \ --reasoning-parser-plugin super_v3_reasoning_parser.py \
--reasoning-parser super_v3 --reasoning-parser super_v3