Fixed qwen3.6 recipes

This commit is contained in:
Eugene Rakhmatulin
2026-05-06 10:56:09 -07:00
parent c67c5b5c1e
commit b87854fd4c
2 changed files with 5 additions and 6 deletions

View File

@@ -1,5 +1,5 @@
# Recipe: Qwen/Qwen3.5-35B-A3B-FP8 # Recipe: Qwen/Qwen3.6-35B-A3B-FP8
# Qwen/Qwen3.5-35B-A3B model in native FP8 format # Qwen/Qwen3.6-35B-A3B model in native FP8 format
recipe_version: "1" recipe_version: "1"
@@ -33,7 +33,7 @@ env:
# The vLLM serve command template # The vLLM serve command template
command: | command: |
vllm serve Qwen/Qwen3.5-35B-A3B-FP8 \ vllm serve Qwen/Qwen3.6-35B-A3B-FP8 \
--host {host} \ --host {host} \
--port {port} \ --port {port} \
--max-model-len {max_model_len} \ --max-model-len {max_model_len} \
@@ -46,6 +46,6 @@ command: |
--attention-backend flash_attn \ --attention-backend flash_attn \
--enable-prefix-caching \ --enable-prefix-caching \
--chat-template fixed_chat_template.jinja \ --chat-template fixed_chat_template.jinja \
--speculative-config '{{"method": "dflash", "model": "z-lab/Qwen3.5-35B-A3B-DFlash", "num_speculative_tokens": 15}}' \ --speculative-config '{{"method": "dflash", "model": "z-lab/Qwen3.6-35B-A3B-DFlash", "num_speculative_tokens": 15}}' \
-tp {tensor_parallel} \ -tp {tensor_parallel} \
--distributed-executor-backend ray --distributed-executor-backend ray

View File

@@ -14,7 +14,6 @@ model: Qwen/Qwen3.6-35B-A3B-FP8
# Container image to use # Container image to use
container: vllm-node container: vllm-node
# Mod required to fix slowness and crash in the cluster (tracking https://github.com/vllm-project/vllm/issues/33857)
mods: mods:
- mods/fix-qwen3.6-chat-template - mods/fix-qwen3.6-chat-template
@@ -33,7 +32,7 @@ env:
# The vLLM serve command template # The vLLM serve command template
command: | command: |
vllm serve Qwen/Qwen3.5-35B-A3B-FP8 \ vllm serve Qwen/Qwen3.6-35B-A3B-FP8 \
--host {host} \ --host {host} \
--port {port} \ --port {port} \
--max-model-len {max_model_len} \ --max-model-len {max_model_len} \