Files
spark-vllm-docker/recipes/nemotron-3-super-nvfp4.yaml
2026-03-12 13:30:15 -07:00

45 lines
1.2 KiB
YAML

# Recipe: Nemotron-3-Super-NVFP4
# Optimized for Marlin backend throughput
recipe_version: "1"
name: Nemotron-3-Super-NVFP4-Marlin-Optimized
description: vLLM serving Nemotron-3-Super-120B using Marlin kernels
model: nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4
container: vllm-node
cluster_only: false
solo_only: false
mods:
- mods/nemotron-super
container: vllm-node
defaults:
port: 8000
host: 0.0.0.0
tensor_parallel: 2
gpu_memory_utilization: 0.7
max_model_len: 262144
max_num_seqs: 10
env:
VLLM_NVFP4_GEMM_BACKEND: "marlin"
VLLM_TEST_FORCE_FP8_MARLIN: "1"
VLLM_MARLIN_USE_ATOMIC_ADD: "1"
command: |
vllm serve nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 \
--kv-cache-dtype fp8 \
-tp {tensor_parallel} \
--trust-remote-code \
--gpu-memory-utilization {gpu_memory_utilization} \
--max-model-len {max_model_len} \
--max-num-seqs {max_num_seqs} \
--enable-prefix-caching \
--host {host} \
--port {port} \
--enable-auto-tool-choice \
--load-format fastsafetensors \
--tool-call-parser qwen3_coder \
--reasoning-parser-plugin super_v3_reasoning_parser.py \
--reasoning-parser super_v3 \
--tensor-parallel-size {tensor_parallel} \
--distributed-executor-backend ray