Files
spark-vllm-docker/Dockerfile
Eugene Rakhmatulin 8385506c5e Fixes
2026-03-20 23:51:21 -07:00

302 lines
12 KiB
Docker

# syntax=docker/dockerfile:1.6
# Limit build parallelism to reduce OOM situations
ARG BUILD_JOBS=16
# =========================================================
# STAGE 1: Base Build Image
# =========================================================
FROM nvidia/cuda:13.2.0-devel-ubuntu24.04 AS base
# Build parallemism
ARG BUILD_JOBS
ENV MAX_JOBS=${BUILD_JOBS}
ENV CMAKE_BUILD_PARALLEL_LEVEL=${BUILD_JOBS}
ENV NINJAFLAGS="-j${BUILD_JOBS}"
ENV MAKEFLAGS="-j${BUILD_JOBS}"
# Set non-interactive frontend to prevent apt prompts
ENV DEBIAN_FRONTEND=noninteractive
# Allow pip to install globally on Ubuntu 24.04 without a venv
ENV PIP_BREAK_SYSTEM_PACKAGES=1
# Set pip cache directory
ENV PIP_CACHE_DIR=/root/.cache/pip
ENV UV_CACHE_DIR=/root/.cache/uv
ENV UV_SYSTEM_PYTHON=1
ENV UV_BREAK_SYSTEM_PACKAGES=1
ENV UV_LINK_MODE=copy
# Set the base directory environment variable
ENV VLLM_BASE_DIR=/workspace/vllm
# 1. Install Build Dependencies & Ccache
# Added ccache to enable incremental compilation caching
RUN apt update && \
apt install -y --no-install-recommends \
curl vim cmake build-essential ninja-build \
libcudnn9-cuda-13 libcudnn9-dev-cuda-13 \
python3-dev python3-pip git wget \
libnccl-dev libnccl2 libibverbs1 libibverbs-dev rdma-core \
ccache \
&& rm -rf /var/lib/apt/lists/* \
&& pip install uv
# Additional deps
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
uv pip install torch torchvision torchaudio triton --index-url https://download.pytorch.org/whl/nightly/cu130 && \
uv pip install nvidia-nvshmem-cu13 "apache-tvm-ffi<0.2" filelock pynvml requests tqdm
# Configure Ccache for CUDA/C++
ENV PATH=/usr/lib/ccache:$PATH
ENV CCACHE_DIR=/root/.ccache
# Limit ccache size to prevent unbounded growth (e.g. 50G)
ENV CCACHE_MAXSIZE=50G
# Enable compression to save space
ENV CCACHE_COMPRESS=1
# Tell CMake to use ccache for compilation
ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache
ENV CMAKE_CUDA_COMPILER_LAUNCHER=ccache
# Setup Workspace
WORKDIR $VLLM_BASE_DIR
# 2. Set Environment Variables
ARG TORCH_CUDA_ARCH_LIST="12.1a"
ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}
ENV TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas
# =========================================================
# STAGE 2: FlashInfer Builder
# =========================================================
FROM base AS flashinfer-builder
ARG FLASHINFER_CUDA_ARCH_LIST="12.1a"
ENV FLASHINFER_CUDA_ARCH_LIST=${FLASHINFER_CUDA_ARCH_LIST}
WORKDIR $VLLM_BASE_DIR
ARG FLASHINFER_REF=main
# --- CACHE BUSTER ---
# Change this argument to force a re-download of FlashInfer
ARG CACHEBUST_FLASHINFER=1
# Smart Git Clone (Fetch changes instead of full re-clone)
RUN --mount=type=cache,id=repo-cache,target=/repo-cache \
cd /repo-cache && \
if [ ! -d "flashinfer" ]; then \
echo "Cache miss: Cloning FlashInfer from scratch..." && \
git clone --recursive https://github.com/flashinfer-ai/flashinfer.git; \
if [ "$FLASHINFER_REF" != "main" ]; then \
cd flashinfer && \
git checkout ${FLASHINFER_REF}; \
fi; \
else \
echo "Cache hit: Fetching flashinfer updates..." && \
cd flashinfer && \
git fetch origin && \
git fetch origin --tags --force && \
(git checkout --detach origin/${FLASHINFER_REF} 2>/dev/null || git checkout ${FLASHINFER_REF}) && \
git submodule update --init --recursive && \
git clean -fdx && \
git gc --auto; \
fi && \
cp -a /repo-cache/flashinfer /workspace/flashinfer
WORKDIR /workspace/flashinfer
# Apply patch to avoid re-downloading existing cubins
COPY flashinfer_cache.patch .
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
--mount=type=cache,id=ccache,target=/root/.ccache \
--mount=type=cache,id=cubins-cache,target=/workspace/flashinfer/flashinfer-cubin/flashinfer_cubin/cubins \
patch -p1 < flashinfer_cache.patch && \
# flashinfer-python
sed -i -e 's/license = "Apache-2.0"/license = { text = "Apache-2.0" }/' -e '/license-files/d' pyproject.toml && \
uv build --no-build-isolation --wheel . --out-dir=/workspace/wheels -v && \
# flashinfer-cubin
cd flashinfer-cubin && uv build --no-build-isolation --wheel . --out-dir=/workspace/wheels -v && \
# flashinfer-jit-cache
cd ../flashinfer-jit-cache && \
uv build --no-build-isolation --wheel . --out-dir=/workspace/wheels -v && \
# dump git ref in the wheels dir
cd .. && git rev-parse HEAD > /workspace/wheels/.flashinfer-commit
# =========================================================
# STAGE 3: FlashInfer Wheel Export
# =========================================================
FROM scratch AS flashinfer-export
COPY --from=flashinfer-builder /workspace/wheels /
# =========================================================
# STAGE 4: vLLM Builder
# =========================================================
FROM base AS vllm-builder
ARG TORCH_CUDA_ARCH_LIST="12.1a"
ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}
WORKDIR $VLLM_BASE_DIR
# --- VLLM SOURCE CACHE BUSTER ---
ARG CACHEBUST_VLLM=1
# Git reference (branch, tag, or SHA) to checkout
ARG VLLM_REF=main
# Smart Git Clone (Fetch changes instead of full re-clone)
RUN --mount=type=cache,id=repo-cache,target=/repo-cache \
cd /repo-cache && \
if [ ! -d "vllm" ]; then \
echo "Cache miss: Cloning vLLM from scratch..." && \
git clone --recursive https://github.com/vllm-project/vllm.git; \
if [ "$VLLM_REF" != "main" ]; then \
cd vllm && \
git checkout ${VLLM_REF}; \
fi; \
else \
echo "Cache hit: Fetching updates..." && \
cd vllm && \
git fetch origin && \
git fetch origin --tags --force && \
(git checkout --detach origin/${VLLM_REF} 2>/dev/null || git checkout ${VLLM_REF}) && \
git submodule update --init --recursive && \
git clean -fdx && \
git gc --auto; \
fi && \
cp -a /repo-cache/vllm $VLLM_BASE_DIR/
WORKDIR $VLLM_BASE_DIR/vllm
ARG VLLM_PRS=""
RUN if [ -n "$VLLM_PRS" ]; then \
echo "Applying PRs: $VLLM_PRS"; \
for pr in $VLLM_PRS; do \
echo "Fetching and applying PR #$pr..."; \
curl -fL "https://github.com/vllm-project/vllm/pull/${pr}.diff" | git apply -v; \
done; \
fi
# Prepare build requirements
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
python3 use_existing_torch.py && \
sed -i "/flashinfer/d" requirements/cuda.txt && \
sed -i '/^triton\b/d' requirements/test.txt && \
sed -i '/^fastsafetensors\b/d' requirements/test.txt && \
uv pip install -r requirements/build.txt
# Apply Patches
# TEMPORARY PATCH for fastsafetensors loading in cluster setup - tracking https://github.com/vllm-project/vllm/issues/34180
# COPY fastsafetensors.patch .
# RUN if patch -p1 --dry-run --reverse < fastsafetensors.patch &>/dev/null; then \
# echo "PR #34180 is already applied"; \
# else \
# patch -p1 < fastsafetensors.patch; \
# fi
# TEMPORARY PATCH for broken vLLM build (unguarded Hopper code) - reverting PR #34758 and #34302
RUN curl -L https://patch-diff.githubusercontent.com/raw/vllm-project/vllm/pull/34758.diff | patch -p1 -R || echo "Cannot revert PR #34758, skipping"
RUN curl -L https://patch-diff.githubusercontent.com/raw/vllm-project/vllm/pull/34302.diff | patch -p1 -R || echo "Cannot revert PR #34302, skipping"
# Final Compilation
RUN --mount=type=cache,id=ccache,target=/root/.ccache \
--mount=type=cache,id=uv-cache,target=/root/.cache/uv \
uv build --no-build-isolation --wheel . --out-dir=/workspace/wheels -v && \
# dump git ref in the wheels dir
git rev-parse HEAD > /workspace/wheels/.vllm-commit
# =========================================================
# STAGE 5: vLLM Wheel Export
# =========================================================
FROM scratch AS vllm-export
COPY --from=vllm-builder /workspace/wheels /
# =========================================================
# STAGE 6: Runner (Installs wheels from host ./wheels/)
# =========================================================
FROM nvidia/cuda:13.2.0-devel-ubuntu24.04 AS runner
# Transferring build settings from build image because of ptxas/jit compilation during vLLM startup
# Build parallemism
ARG BUILD_JOBS
ENV MAX_JOBS=${BUILD_JOBS}
ENV CMAKE_BUILD_PARALLEL_LEVEL=${BUILD_JOBS}
ENV NINJAFLAGS="-j${BUILD_JOBS}"
ENV MAKEFLAGS="-j${BUILD_JOBS}"
ENV DEBIAN_FRONTEND=noninteractive
ENV PIP_BREAK_SYSTEM_PACKAGES=1
ENV VLLM_BASE_DIR=/workspace/vllm
# Set pip cache directory
ENV PIP_CACHE_DIR=/root/.cache/pip
ENV UV_CACHE_DIR=/root/.cache/uv
ENV UV_SYSTEM_PYTHON=1
ENV UV_BREAK_SYSTEM_PACKAGES=1
ENV UV_LINK_MODE=copy
# Install runtime dependencies
RUN apt update && \
apt install -y --no-install-recommends \
python3 python3-pip python3-dev vim curl git wget \
libcudnn9-cuda-13 \
libnccl-dev libnccl2 libibverbs1 libibverbs-dev rdma-core \
libxcb1 \
&& rm -rf /var/lib/apt/lists/* \
&& pip install uv
# Set final working directory
WORKDIR $VLLM_BASE_DIR
# Download Tiktoken files
RUN mkdir -p tiktoken_encodings && \
wget -O tiktoken_encodings/o200k_base.tiktoken "https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken" && \
wget -O tiktoken_encodings/cl100k_base.tiktoken "https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken"
ARG PRE_TRANSFORMERS=0
# Install deps
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
uv pip install torch torchvision torchaudio triton --index-url https://download.pytorch.org/whl/nightly/cu130 && \
uv pip install nvidia-nvshmem-cu13 "apache-tvm-ffi<0.2"
# Install wheels from host ./wheels/ (bind-mounted from build context — no layer bloat)
# With --tf5: override vLLM's transformers<5 constraint to get transformers>=5
RUN --mount=type=bind,source=wheels,target=/workspace/wheels \
--mount=type=cache,id=uv-cache,target=/root/.cache/uv \
if [ "$PRE_TRANSFORMERS" = "1" ]; then \
echo "transformers>=5.0.0" > /tmp/tf-override.txt && \
uv pip install /workspace/wheels/*.whl --override /tmp/tf-override.txt; \
else \
uv pip install /workspace/wheels/*.whl; \
fi
# Setup environment for runtime
ARG TORCH_CUDA_ARCH_LIST="12.1a"
ENV TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}
ARG FLASHINFER_CUDA_ARCH_LIST="12.1a"
ENV FLASHINFER_CUDA_ARCH_LIST=${FLASHINFER_CUDA_ARCH_LIST}
ENV TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas
ENV TIKTOKEN_ENCODINGS_BASE=$VLLM_BASE_DIR/tiktoken_encodings
ENV PATH=$VLLM_BASE_DIR:$PATH
# Final extra deps
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
uv pip install ray[default] fastsafetensors
# Cleanup
# Keeping it here for reference - this won't work as is without squashing layers
# RUN uv pip uninstall absl-py apex argon2-cffi \
# argon2-cffi-bindings arrow asttokens astunparse async-lru audioread babel beautifulsoup4 \
# black bleach comm contourpy cycler datasets debugpy decorator defusedxml dllist dm-tree \
# execnet executing expecttest fastjsonschema fonttools fqdn gast hypothesis \
# ipykernel ipython ipython_pygments_lexers isoduration isort jedi joblib jupyter-events \
# jupyter-lsp jupyter_client jupyter_core jupyter_server jupyter_server_terminals jupyterlab \
# jupyterlab_code_formatter jupyterlab_code_formatter jupyterlab_pygments jupyterlab_server \
# jupyterlab_tensorboard_pro jupytext kiwisolver matplotlib matplotlib-inline matplotlib-inline \
# mistune ml_dtypes mock nbclient nbconvert nbformat nest-asyncio notebook notebook_shim \
# opt_einsum optree outlines_core overrides pandas pandocfilters parso pexpect polygraphy pooch \
# pyarrow pycocotools pytest-flakefinder pytest-rerunfailures pytest-shard pytest-xdist \
# scikit-learn scipy Send2Trash soundfile soupsieve soxr spin stack-data \
# wcwidth webcolors xdoctest Werkzeug