63 lines
1.8 KiB
YAML
63 lines
1.8 KiB
YAML
# Recipe: Qwen3.5-122B-A10B-INT4-Autoround
|
|
# Qwen3.5-122B model in Intel INT4-Autoround quantization
|
|
# Important: set memory utilization in GB, not percentage! Requires --no-ray to fit full context on two sparks.
|
|
# If you experience node shutdown, please limit GPU clocks on the affected node (or both): `sudo nvidia-smi -lgc 200,2150`
|
|
|
|
recipe_version: "1"
|
|
name: Qwen3.5-397B-INT4-Autoround
|
|
description: EXPERIMENTAL recipe for Qwen3.5-397B-INT4-Autoround (please refer to README for details! Use with `--no-ray` parameter!)
|
|
|
|
# HuggingFace model to download (optional, for --download-model)
|
|
model: Intel/Qwen3.5-397B-A17B-int4-AutoRound
|
|
|
|
cluster_only: true
|
|
|
|
# Container image to use
|
|
container: vllm-node-tf5
|
|
|
|
build_args:
|
|
- --tf5
|
|
|
|
# Mod required to fix ROPE syntax error
|
|
mods:
|
|
# - mods/fix-qwen3.5-autoround
|
|
- mods/fix-qwen3.5-chat-template
|
|
- mods/gpu-mem-util-gb
|
|
- mods/drop-caches
|
|
|
|
# Default settings (can be overridden via CLI)
|
|
defaults:
|
|
port: 8000
|
|
host: 0.0.0.0
|
|
tensor_parallel: 2
|
|
gpu_memory_utilization: 112
|
|
max_model_len: 262144
|
|
max_num_batched_tokens: 4176
|
|
|
|
# Environment variables
|
|
env:
|
|
PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:True"
|
|
VLLM_MARLIN_USE_ATOMIC_ADD: 1
|
|
|
|
# The vLLM serve command template
|
|
command: |
|
|
vllm serve Intel/Qwen3.5-397B-A17B-int4-AutoRound \
|
|
--max-model-len {max_model_len} \
|
|
--max-num-seqs 2 \
|
|
--kv-cache-dtype fp8 \
|
|
--gpu-memory-utilization-gb {gpu_memory_utilization} \
|
|
--port {port} \
|
|
--host {host} \
|
|
--enable-prefix-caching \
|
|
--enable-auto-tool-choice \
|
|
--tool-call-parser qwen3_xml \
|
|
--reasoning-parser qwen3 \
|
|
--max-num-batched-tokens {max_num_batched_tokens} \
|
|
--trust-remote-code \
|
|
--chat-template unsloth.jinja \
|
|
--load-format instanttensor \
|
|
-tp {tensor_parallel} \
|
|
--distributed-executor-backend ray
|
|
|
|
|