feat: Add recipe-based one-click model deployment system
Introduces a YAML recipe system for simplified model deployment: - run-recipe.py: Main script handling build, download, and launch - run-recipe.sh: Bash wrapper for dependency management - recipes/: Pre-configured recipes for common models - glm-4.7-flash-awq.yaml: GLM-4.7-Flash with AWQ quantization - glm-4.7-nvfp4.yaml: GLM-4.7 with NVFP4 (cluster-only) - minimax-m2-awq.yaml: MiniMax M2 with AWQ - openai-gpt-oss-120b.yaml: OpenAI GPT-OSS 120B with MXFP4 Key features: - Auto-discover cluster nodes with --discover, saves to .env - Load nodes from .env automatically on subsequent runs - cluster_only flag for models requiring multi-node setup - build_args field for Dockerfile selection (--pre-tf, --exp-mxfp4) - Solo mode auto-strips --distributed-executor-backend ray - --setup flag for full build + download + run workflow - --dry-run to preview execution without running Usage: ./run-recipe.sh --discover # Find and save cluster nodes ./run-recipe.sh glm-4.7-flash-awq --solo --setup ./run-recipe.sh glm-4.7-nvfp4 --setup # Uses nodes from .env
This commit is contained in:
40
recipes/minimax-m2-awq.yaml
Normal file
40
recipes/minimax-m2-awq.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# Recipe: MiniMax-M2-AWQ
|
||||
# MiniMax M2 model with AWQ quantization
|
||||
|
||||
recipe_version: "1"
|
||||
name: MiniMax-M2-AWQ
|
||||
description: vLLM serving MiniMax-M2-AWQ with Ray distributed backend
|
||||
|
||||
# HuggingFace model to download (optional, for --download-model)
|
||||
model: QuantTrio/MiniMax-M2-AWQ
|
||||
|
||||
# Container image to use
|
||||
container: vllm-node
|
||||
|
||||
# No mods required
|
||||
mods: []
|
||||
|
||||
# Default settings (can be overridden via CLI)
|
||||
defaults:
|
||||
port: 8000
|
||||
host: 0.0.0.0
|
||||
tensor_parallel: 2
|
||||
gpu_memory_utilization: 0.7
|
||||
max_model_len: 128000
|
||||
|
||||
# Environment variables
|
||||
env: {}
|
||||
|
||||
# The vLLM serve command template
|
||||
command: |
|
||||
vllm serve QuantTrio/MiniMax-M2-AWQ \
|
||||
--port {port} \
|
||||
--host {host} \
|
||||
--gpu-memory-utilization {gpu_memory_utilization} \
|
||||
-tp {tensor_parallel} \
|
||||
--distributed-executor-backend ray \
|
||||
--max-model-len {max_model_len} \
|
||||
--load-format fastsafetensors \
|
||||
--enable-auto-tool-choice \
|
||||
--tool-call-parser minimax_m2 \
|
||||
--reasoning-parser minimax_m2_append_think
|
||||
Reference in New Issue
Block a user