# Server image: Linux + TensorFlow with CUDA (GPU). Host needs NVIDIA driver + nvidia-container-toolkit.
# Build from repository root (compose sets context to repo root):
#   docker build -f src/flash_detector_web/deploy/Dockerfile -t flicker-detector-gpu .

FROM node:20-bookworm-slim AS web
WORKDIR /web
COPY src/flicker_detector_webui/package.json src/flicker_detector_webui/package-lock.json ./
RUN npm ci
COPY src/flicker_detector_webui/ ./
RUN npm run build

FROM python:3.11-slim-bookworm

WORKDIR /app

RUN apt-get update && apt-get install -y --no-install-recommends \
    libglib2.0-0 \
    libgomp1 \
    && rm -rf /var/lib/apt/lists/*

# GPU build: CUDA/cuDNN via pip (Linux). Install before `pip install .` so the resolver keeps this variant.
RUN pip install --no-cache-dir "tensorflow[and-cuda]>=2.14,<3"

COPY pyproject.toml /app/pyproject.toml
COPY src /app/src
COPY --from=web /web/dist /app/src/flicker_detector_webui/dist
WORKDIR /app
RUN pip install --no-cache-dir .

ENV FLICKER_DETECTOR_BIND=0.0.0.0
ENV FLICKER_DETECTOR_PORT=8765
ENV FLICKER_DETECTOR_WEB_DATA=/data
ENV HF_HOME=/data/huggingface

RUN mkdir -p /data/huggingface

EXPOSE 8765

HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
  CMD python -c "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8765/api/health', timeout=5)"

CMD ["gpu-flicker-detector-server", "--host", "0.0.0.0", "--port", "8765"]
