# Trainything — build & run
#
# Prerequisites: Docker or Podman
#
# Quick start:
#   make build
#   make run                 # auto-detects GPU (NVIDIA/AMD/CPU)
#
# Run a pipeline headlessly (no browser needed):
#   make run-pipeline PIPELINE=examples/mnist_train.json
#
# Development:
#   make dev           # backend + frontend dev servers
#   make test          # fast tests (no data downloads)
#   make test-all      # all tests including slow ones
#   make lint          # ruff linter

IMAGE      ?= trainything
CONTAINER  ?= trainything
PORT       ?= 8000
VOLUME     ?= trainything-data

# Auto-detect container runtime: prefer docker, fall back to podman
RUNTIME := $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null)

# Auto-detect GPU flags:
#   NVIDIA  → --gpus all
#   AMD     → --device=/dev/kfd --device=/dev/dri
#   DRI only (APU/iGPU) → --device=/dev/dri (for EGL/GL rendering)
#   Neither → (empty, CPU-only)
GPU_FLAGS := $(shell \
	if command -v nvidia-smi >/dev/null 2>&1; then \
		echo "--gpus all"; \
	elif [ -e /dev/kfd ]; then \
		echo "--device=/dev/kfd --device=/dev/dri"; \
	elif [ -e /dev/dri ]; then \
		echo "--device=/dev/dri"; \
	fi)

# Build args — override for different PyTorch variants:
#   make build TORCH_INDEX=https://download.pytorch.org/whl/rocm6.2
#   make build TORCH_INDEX=https://download.pytorch.org/whl/cpu
TORCH_INDEX ?= https://download.pytorch.org/whl/cu124

# -----------------------------------------------------------------------
# Container targets
# -----------------------------------------------------------------------

.PHONY: build run run-pipeline stop shell clean

build:
	$(RUNTIME) build --build-arg TORCH_INDEX=$(TORCH_INDEX) -t $(IMAGE) .

run:
	-$(RUNTIME) rm -f $(CONTAINER) 2>/dev/null
	$(RUNTIME) run --rm --name $(CONTAINER) \
		$(GPU_FLAGS) \
		-p $(PORT):8000 \
		-v $(VOLUME):/data \
		$(IMAGE)

# Run a pipeline JSON inside the container (no server, just execute)
PIPELINE ?= examples/mnist_train.json

run-pipeline:
	$(RUNTIME) run --rm \
		$(GPU_FLAGS) \
		-v $(VOLUME):/data \
		-v $(PWD)/examples:/app/examples \
		$(IMAGE) run $(PIPELINE)

stop:
	-$(RUNTIME) stop $(CONTAINER)

shell:
	$(RUNTIME) run --rm -it \
		$(GPU_FLAGS) \
		-p $(PORT):8000 \
		-v $(VOLUME):/data \
		--entrypoint bash \
		$(IMAGE)

# -----------------------------------------------------------------------
# Local development (no container)
# -----------------------------------------------------------------------

.PHONY: dev dev-backend dev-frontend test test-all lint format check venv serve

venv:
	python3 -m venv .venv
	.venv/bin/pip install -e ".[all,dev]"

serve:
	.venv/bin/trainything serve

dev-backend:
	uvicorn server.main:app --reload --host 127.0.0.1 --port 8000

dev-frontend:
	cd frontend && npm run dev

dev:
	@echo "Run in two terminals:"
	@echo "  make dev-backend"
	@echo "  make dev-frontend"

test:
	pytest -v -m "not slow"

test-all:
	pytest -v

lint:
	ruff check server/ tests/
	cd frontend && npx tsc --noEmit

format:
	ruff format server/ tests/
	ruff check --fix server/ tests/
	cd frontend && npx prettier --write "src/**/*.{ts,tsx,css}"

# Validate all example pipelines and run lint + tests
check:
	@echo "=== Dry-run all examples ==="
	@for f in examples/*.json; do \
		echo "  $$f" && python -m server.main run --dry-run "$$f" || exit 1; \
	done
	@echo ""
	@echo "=== Lint ==="
	ruff check server/ tests/
	@echo ""
	@echo "=== Tests ==="
	pytest -v -m "not slow"
	@echo ""
	@echo "All checks passed."

clean:
	-$(RUNTIME) rmi $(IMAGE)
	-$(RUNTIME) volume rm $(VOLUME)
