Files
home-llm/Dockerfile
2025-08-13 23:44:08 -04:00

52 lines
1.7 KiB
Docker

FROM nvcr.io/nvidia/cuda:12.9.1-cudnn-runtime-ubuntu22.04
# Install Python 3.11 and system dependencies
RUN apt-get update && apt-get install -y \
python3.11 python3-pip git curl libgl1 libglib2.0-0 ffmpeg \
&& curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
apt-get install -y nodejs \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
RUN python -m pip install --upgrade pip
# Set working directory
WORKDIR /app
# Clone Open WebUI
RUN git clone https://github.com/open-webui/open-webui.git .
# Patch env.py for Python 3.11 compatibility
RUN sed -i 's/logging.getLevelNamesMapping()/logging._nameToLevel/' /app/backend/open_webui/env.py
# Set Node.js memory limit for build
ENV NODE_OPTIONS="--max_old_space_size=8192"
# Build frontend
WORKDIR /app/frontend
RUN npm install y-protocols --legacy-peer-deps
RUN npm install --legacy-peer-deps
# Build the frontend with verbose logging
RUN npm run build --verbose || true
# Change ownership of the backend directory
RUN chown -R 1001:1001 /app/backend
# Install backend dependencies
WORKDIR /app/backend
RUN python -m pip install --no-cache-dir -r requirements.txt uvicorn
# Install PyTorch with CUDA 12.x support
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
# Set the DATABASE_URL environment variable (uncomment if needed)
# ENV DATABASE_URL='sqlite:////home/llm/open-webui/database.db'
# RUN python -m peewee_migrate migrate
EXPOSE 3000
CMD ["uvicorn", "open_webui.main:app", "--host", "0.0.0.0", "--port", "3000"]