More robust handling of PRs

This commit is contained in:
Eugene Rakhmatulin
2026-04-22 13:18:12 -07:00
parent c187912e23
commit 7dea11bbf0

View File

@@ -122,10 +122,15 @@ WORKDIR /workspace/flashinfer
ARG FLASHINFER_PRS="" ARG FLASHINFER_PRS=""
RUN if [ -n "$FLASHINFER_PRS" ]; then \ RUN if [ -n "$FLASHINFER_PRS" ]; then \
# Git requires a user identity to create merge commits
git config --global user.email "builder@example.com"; \
git config --global user.name "Docker Builder"; \
\
echo "Applying PRs: $FLASHINFER_PRS"; \ echo "Applying PRs: $FLASHINFER_PRS"; \
for pr in $FLASHINFER_PRS; do \ for pr in $FLASHINFER_PRS; do \
echo "Fetching and applying PR #$pr..."; \ echo "Fetching and merging PR #$pr..."; \
curl -fL "https://github.com/flashinfer-ai/flashinfer/pull/${pr}.diff" | git apply -v; \ git fetch origin pull/${pr}/head:pr-${pr}; \
git merge pr-${pr} --no-edit; \
done; \ done; \
fi fi
@@ -204,10 +209,15 @@ WORKDIR $VLLM_BASE_DIR/vllm
ARG VLLM_PRS="" ARG VLLM_PRS=""
RUN if [ -n "$VLLM_PRS" ]; then \ RUN if [ -n "$VLLM_PRS" ]; then \
# Git requires a user identity to create merge commits
git config --global user.email "builder@example.com"; \
git config --global user.name "Docker Builder"; \
\
echo "Applying PRs: $VLLM_PRS"; \ echo "Applying PRs: $VLLM_PRS"; \
for pr in $VLLM_PRS; do \ for pr in $VLLM_PRS; do \
echo "Fetching and applying PR #$pr..."; \ echo "Fetching and merging PR #$pr..."; \
curl -fL "https://github.com/vllm-project/vllm/pull/${pr}.diff" | git apply -v; \ git fetch origin pull/${pr}/head:pr-${pr}; \
git merge pr-${pr} --no-edit; \
done; \ done; \
fi fi
@@ -306,7 +316,7 @@ ARG PRE_TRANSFORMERS=0
# Install deps # Install deps
RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \ RUN --mount=type=cache,id=uv-cache,target=/root/.cache/uv \
uv pip install torch==2.11.0 torchvision torchaudio triton --index-url https://download.pytorch.org/whl/cu130 && \ uv pip install torch==2.11.0 torchvision torchaudio triton --index-url https://download.pytorch.org/whl/cu130 && \
uv pip install nvidia-nvshmem-cu13 "apache-tvm-ffi<0.2" uv pip install nvidia-nvshmem-cu13 "apache-tvm-ffi<0.2" nvidia-cutlass-dsl-libs-cu13
# Install wheels from host ./wheels/ (bind-mounted from build context — no layer bloat) # Install wheels from host ./wheels/ (bind-mounted from build context — no layer bloat)
# With --tf5: override vLLM's transformers<5 constraint to get transformers>=5 # With --tf5: override vLLM's transformers<5 constraint to get transformers>=5