# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Virtual envs
.venv/
venv/
env/

# uv
# uv.lock IS committed — do not ignore
.uv/

# Testing / coverage
.pytest_cache/
.coverage
.coverage.*
htmlcov/
.tox/
.mypy_cache/
.ruff_cache/
.hypothesis/

# IDE
.vscode/
.idea/
*.swp
*.swo

# OS
.DS_Store
Thumbs.db

# Numba cache
*.nbi
*.nbc

# Local Redis data
docker/redis_data/

# py-spy artifacts (transient — keep committed flamegraphs only in benchmarks/results/)
*.svg.tmp

# Benchmark transient outputs (Step 16). Raw per-round timing data with
# --benchmark-save-data is multi-MB per file and machine-specific; never
# committed. The aggregated N=20 outputs (benchmarks/results/n20/*.json,
# top-level only) ARE small + headline-quality — those stay committed.
benchmarks/results/Linux-CPython-*/
benchmarks/results/.benchmarks/
benchmarks/results/regression_check.json
benchmarks/results/n20/_tmp/

# Personal build progress journal — not shared.
progress/

# Claude Code per-user settings — not shared.
.claude/

# Personal dev-environment scripts — not shared.
dev-up.sh
dev-*.sh
dev-*.cmd
dev-*.bat

# Personal session-context file for Claude Code — not shared.
CLAUDE.md

# Ad-hoc perf diagnostic scripts at repo root — not shared.
# Real benchmarks live in benchmarks/ and are committed.
bench_*.py
