From 8fec9bed066cd89999a8b7f08931ccfe7ce50896 Mon Sep 17 00:00:00 2001 From: Eugene Rakhmatulin Date: Thu, 12 Mar 2026 13:30:15 -0700 Subject: [PATCH] Updated Nemotron to support dual sparks --- README.md | 10 ++++++++++ recipes/nemotron-3-super-nvfp4.yaml | 9 +++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c3f1e52..da32fdf 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,16 @@ Don't do it every time you rebuild, because it will slow down compilation times. For periodic maintenance, I recommend using a filter: `docker builder prune --filter until=72h` +### 2026-03-12 + +#### Nemotron-3-Super-120B NVFP4 Recipe + +Added a new recipe `nemotron-3-super-nvfp4` for running `nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4` with Marlin kernels. Supports both solo and cluster modes. Includes a custom reasoning parser (`super_v3_reasoning_parser.py`) fetched from the model repository. Supports both dual and single Spark configurations. + +```bash +./run-recipe.sh nemotron-3-super-nvfp4 +``` + ### 2026-03-11 #### Qwen3-Coder-Next INT4-AutoRound Recipe diff --git a/recipes/nemotron-3-super-nvfp4.yaml b/recipes/nemotron-3-super-nvfp4.yaml index 48120fe..a4de32d 100644 --- a/recipes/nemotron-3-super-nvfp4.yaml +++ b/recipes/nemotron-3-super-nvfp4.yaml @@ -7,8 +7,7 @@ description: vLLM serving Nemotron-3-Super-120B using Marlin kernels model: nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4 container: vllm-node cluster_only: false -# This model can only run on single node (solo) -solo_only: true +solo_only: false mods: - mods/nemotron-super @@ -17,7 +16,7 @@ container: vllm-node defaults: port: 8000 host: 0.0.0.0 - tensor_parallel: 1 + tensor_parallel: 2 gpu_memory_utilization: 0.7 max_model_len: 262144 max_num_seqs: 10 @@ -41,4 +40,6 @@ command: | --load-format fastsafetensors \ --tool-call-parser qwen3_coder \ --reasoning-parser-plugin super_v3_reasoning_parser.py \ - --reasoning-parser super_v3 \ No newline at end of file + --reasoning-parser super_v3 \ + --tensor-parallel-size {tensor_parallel} \ + --distributed-executor-backend ray \ No newline at end of file