62 lines
1.5 KiB
YAML
62 lines
1.5 KiB
YAML
# Recipe: Qwen3.5-397B-A17B-FP8
|
|
# Qwen3.5-397B-A17B model in FP8 precision
|
|
# Multi-modal input
|
|
|
|
recipe_version: "1"
|
|
name: Qwen3.5-397B-A17B-FP8
|
|
description: vLLM serving Qwen3.5-397B-A17B-FP8
|
|
|
|
# HuggingFace model to download (optional, for --download-model)
|
|
model: Qwen/Qwen3.5-397B-A17B-FP8
|
|
|
|
#solo_only: true
|
|
|
|
# Container image to use
|
|
container: vllm-node-tf5
|
|
|
|
build_args:
|
|
- --tf5
|
|
|
|
# Mod required to fix ROPE syntax error
|
|
mods:
|
|
- mods/fix-qwen3.5-autoround
|
|
|
|
# Default settings (can be overridden via CLI)
|
|
defaults:
|
|
port: 8000
|
|
host: 0.0.0.0
|
|
tensor_parallel: 4
|
|
gpu_memory_utilization: 0.85
|
|
max_model_len: 262144
|
|
max_num_batched_tokens: 8192
|
|
|
|
# Environment variables
|
|
env:
|
|
VLLM_USE_DEEP_GEMM: 0
|
|
VLLM_USE_FLASHINFER_MOE_FP16: 1
|
|
VLLM_USE_FLASHINFER_SAMPLER: 0
|
|
OMP_NUM_THREADS: 4
|
|
|
|
# The vLLM serve command template
|
|
command: |
|
|
vllm serve Qwen/Qwen3.5-397B-A17B-FP8 \
|
|
--max-model-len {max_model_len} \
|
|
--gpu-memory-utilization {gpu_memory_utilization} \
|
|
--port {port} \
|
|
--host {host} \
|
|
--load-format fastsafetensors \
|
|
--enable-prefix-caching \
|
|
--enable-auto-tool-choice \
|
|
--tool-call-parser qwen3_coder \
|
|
--reasoning-parser qwen3 \
|
|
--max-num-batched-tokens {max_num_batched_tokens} \
|
|
--trust-remote-code \
|
|
-tp {tensor_parallel} \
|
|
--distributed-executor-backend ray \
|
|
--mm-encoder-tp-mode data \
|
|
--kv-cache-dtype fp8 \
|
|
--compilation-config.cudagraph_mode none \
|
|
--max-num-seqs 32 \
|
|
--attention-backend flashinfer
|
|
|