Switch from nvidia/cuda base + manual PyTorch install to pytorch/pytorch:2.6.0-cuda12.4-cudnn9-runtime base image. This avoids the ~15GB build that exceeds Docker disk limits. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
29 lines
846 B
Docker
Executable File
29 lines
846 B
Docker
Executable File
# Use PyTorch base image with CUDA support (much smaller than building from scratch)
|
|
FROM pytorch/pytorch:2.6.0-cuda12.4-cudnn9-runtime
|
|
|
|
# Set environment variables
|
|
ENV DEBIAN_FRONTEND=noninteractive \
|
|
PYTHONUNBUFFERED=TRUE \
|
|
PYTHONFAULTHANDLER=1
|
|
|
|
# Set working directory
|
|
WORKDIR /workspace
|
|
|
|
# Install chai_lab and transformers in a single layer
|
|
RUN pip install --no-cache-dir \
|
|
chai_lab==0.5.2 \
|
|
"transformers>=4.30.0"
|
|
|
|
# Verify installations
|
|
RUN python -c "import torch; print(f'PyTorch: {torch.__version__}')" && \
|
|
python -c "from transformers import EsmModel; print('transformers: OK')" && \
|
|
python -c "import chai_lab; print('chai_lab: OK')" && \
|
|
chai --help
|
|
|
|
# Add entry point script
|
|
COPY entrypoint.sh /workspace/
|
|
RUN chmod +x /workspace/entrypoint.sh
|
|
|
|
# Set entry point
|
|
ENTRYPOINT ["/workspace/entrypoint.sh"]
|