#!/bin/bash
set -euo pipefail

# ============================================================================
# CI Quick Review - One-Command CI Failure Summary
# ============================================================================
# Simplified wrapper for quick CI failure reviews.
# Fetches CI data and displays a concise, colorized summary.
#
# Usage:
#   ci-quick-review 33                    # Review CI failures for PR #33
#   ci-quick-review my-branch             # Review CI failures for branch
#   ci-quick-review                       # Review CI failures for current branch
#   ci-quick-review --workflow ci-cd 33   # Filter to specific workflow
#   ci-quick-review --json 33             # JSON output only
# ============================================================================

# Get script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
MAGENTA='\033[0;35m'
CYAN='\033[0;36m'
BOLD='\033[1m'
NC='\033[0m'

usage() {
    cat << EOF
Usage: $(basename "$0") [OPTIONS] [PR_NUMBER|BRANCH]

Quick CI failure review with concise summary and colorized output.

OPTIONS:
    --workflow WORKFLOW_NAME    Filter to specific workflow
    --json                      Output JSON only (no formatted display)
    --no-cache                  Bypass cache and fetch fresh data
    --help, -h                  Show this help

ARGUMENTS:
    PR_NUMBER                   Pull request number (e.g., 33)
    BRANCH                      Branch name (e.g., 'my-feature-branch')
    (none)                      Use current branch

EXAMPLES:
    # Quick review of current branch
    $(basename "$0")

    # Review specific PR
    $(basename "$0") 33

    # Review specific branch
    $(basename "$0") my-feature-branch

    # Filter to specific workflow
    $(basename "$0") --workflow ci-cd 33

    # JSON output for scripting
    $(basename "$0") --json 33 | jq '.summary'

EXIT CODES:
    0 - Success (CI data fetched)
    1 - Error (dependency missing, API failure, etc.)
    2 - No CI failures found (success!)
EOF
    exit 0
}

# Parse arguments for JSON mode
JSON_MODE=false
FETCH_ARGS=()

while [[ $# -gt 0 ]]; do
    case $1 in
        --json)
            JSON_MODE=true
            shift
            ;;
        -h|--help)
            usage
            ;;
        *)
            FETCH_ARGS+=("$1")
            shift
            ;;
    esac
done

# Fetch CI data - run once and capture both stdout and stderr
CI_DATA=""
FETCH_EXIT_CODE=0
FETCH_STDERR=""

# Create temp file for stderr capture
STDERR_FILE=$(mktemp)
trap "rm -f '$STDERR_FILE'" EXIT

# Single execution: capture stdout in CI_DATA, stderr in temp file
if ! CI_DATA=$("$SCRIPT_DIR/fetch-ci-data" "${FETCH_ARGS[@]}" 2>"$STDERR_FILE"); then
    FETCH_EXIT_CODE=$?
    FETCH_STDERR=$(cat "$STDERR_FILE")
fi

# If fetch failed with no data, show error from captured stderr
if [ $FETCH_EXIT_CODE -ne 0 ] && [ -z "$CI_DATA" ]; then
    echo -e "${RED}✗ Failed to fetch CI data${NC}" >&2
    if [ -n "$FETCH_STDERR" ]; then
        echo "$FETCH_STDERR" | grep -E '✗|Error|error' >&2 || echo "$FETCH_STDERR" >&2
    fi
    exit 1
fi

# If JSON mode, just output raw JSON
if [ "$JSON_MODE" = true ]; then
    echo "$CI_DATA"
    exit $FETCH_EXIT_CODE
fi

# Extract JSON - remove any non-JSON lines (ANSI codes, status messages)
# First try to extract just the JSON object
CI_JSON=$(echo "$CI_DATA" | sed -n '/^{/,/^}/p' | head -1)
# If that didn't work, try removing ANSI codes and filtering
if [ -z "$CI_JSON" ] || ! echo "$CI_JSON" | jq empty 2>/dev/null; then
    CI_JSON=$(echo "$CI_DATA" | sed 's/\x1b\[[0-9;]*m//g' | grep -v '^\[' | grep -v '^$' || echo "$CI_DATA")
fi

# Check if we got valid JSON
if ! echo "$CI_JSON" | jq empty 2>/dev/null; then
    echo -e "${RED}✗ Invalid JSON response from fetch-ci-data${NC}" >&2
    exit 1
fi

# Parse summary
TOTAL=$(echo "$CI_JSON" | jq -r '.summary.total')
CRITICAL=$(echo "$CI_JSON" | jq -r '.summary.critical')
MAJOR=$(echo "$CI_JSON" | jq -r '.summary.major')
MINOR=$(echo "$CI_JSON" | jq -r '.summary.minor')
PR_NUMBER=$(echo "$CI_JSON" | jq -r '.pr_number')
REPO=$(echo "$CI_JSON" | jq -r '.repository')

# Display summary header
echo ""
echo -e "${BOLD}╔════════════════════════════════════════════════════════════════╗${NC}"
echo -e "${BOLD}║${NC}           ${CYAN}${BOLD}CI FAILURES SUMMARY${NC}                              ${BOLD}║${NC}"
echo -e "${BOLD}╚════════════════════════════════════════════════════════════════╝${NC}"
echo ""
echo -e "${BLUE}Repository:${NC} $REPO"
echo -e "${BLUE}PR Number:${NC} #$PR_NUMBER"
echo ""

# Check if there are any failures
if [ "$TOTAL" -eq 0 ]; then
    echo -e "${GREEN}${BOLD}✅ No CI failures found!${NC}"
    echo ""
    echo -e "${GREEN}All workflows passed successfully.${NC}"
    echo ""
    exit 2
fi

# Display failure counts with color coding
echo -e "${BOLD}Failure Summary:${NC}"
echo ""

if [ "$CRITICAL" -gt 0 ]; then
    echo -e "  ${RED}${BOLD}🔴 CRITICAL:${NC} $CRITICAL failure(s) - ${RED}Must fix before merge${NC}"
fi

if [ "$MAJOR" -gt 0 ]; then
    echo -e "  ${YELLOW}${BOLD}🟠 MAJOR:${NC} $MAJOR failure(s) - ${YELLOW}Should fix before merge${NC}"
fi

if [ "$MINOR" -gt 0 ]; then
    echo -e "  ${BLUE}${BOLD}🟡 MINOR:${NC} $MINOR failure(s) - ${BLUE}Nice to fix${NC}"
fi

echo ""
echo -e "${BOLD}Total Failures:${NC} ${RED}$TOTAL${NC}"
echo ""

# ============================================================================
# Error Log Extraction Functions
# ============================================================================

extract_error_from_logs() {
    local job_id="$1"
    local repo_owner_name="$2"

    # Validate job_id parameter
    if [ -z "$job_id" ] || [ "$job_id" = "null" ]; then
        return 1
    fi

    # Validate job_id is numeric
    if ! [[ "$job_id" =~ ^[0-9]+$ ]]; then
        return 1
    fi

    # Fetch job logs directly using GitHub API
    # This provides much more detailed error information than gh run view
    local logs
    logs=$(gh api "repos/$repo_owner_name/actions/jobs/$job_id/logs" 2>/dev/null || echo "")

    if [ -z "$logs" ]; then
        return 1
    fi

    # Extract the most relevant error lines with multiple strategies
    local error_output=""

    # Strategy 1: Look for pytest FAILED lines with context (most common for Python tests)
    error_output=$(echo "$logs" | grep -B 3 -A 10 "FAILED.*::" | tail -20 || echo "")

    # Strategy 2: Look for assertion errors and exceptions
    if [ -z "$error_output" ]; then
        error_output=$(echo "$logs" | grep -B 2 -A 8 -E "(AssertionError|Exception:|Error:|Traceback)" | tail -20 || echo "")
    fi

    # Strategy 3: Look for ERROR markers from GitHub Actions
    if [ -z "$error_output" ]; then
        error_output=$(echo "$logs" | grep -B 1 -A 5 -E "##\[error\]|ERROR:" | tail -15 || echo "")
    fi

    # Strategy 4: Look for validation errors or violations
    if [ -z "$error_output" ]; then
        error_output=$(echo "$logs" | grep -B 2 -A 5 -i "violation\|failed\|error" | tail -20 || echo "")
    fi

    # Strategy 5: Fall back to generic error patterns
    if [ -z "$error_output" ]; then
        error_output=$(echo "$logs" | grep -B 1 -A 3 -i -E "(^Error |^FAIL:|^fatal:|npm ERR!)" | tail -15 || echo "")
    fi

    if [ -z "$error_output" ]; then
        return 1
    fi

    echo "$error_output"
}

detect_error_pattern() {
    local error_text="$1"

    # Validate error_text parameter
    # Return 2 for empty input to distinguish from "no pattern matched" (return 1)
    if [ -z "$error_text" ]; then
        return 2
    fi

    # Docker build errors
    if echo "$error_text" | grep -qi "fatal: could not read Username"; then
        echo "🔧 Missing GitHub PAT for private repos"
        echo "   Root Cause: Docker build needs GH_PAT for private GitHub dependencies"
        echo "   Fix: Pass --build-arg GH_PAT=\${{ secrets.GH_PAT }} to docker build"
        return 0
    fi

    if echo "$error_text" | grep -qi "authentication required\|Authentication failed"; then
        echo "🔧 Authentication failure"
        echo "   Root Cause: Missing or invalid credentials for private resources"
        echo "   Fix: Check GitHub tokens, Docker credentials, or API keys"
        return 0
    fi

    # Poetry/Python dependency errors
    if echo "$error_text" | grep -qi "poetry.*failed\|poetry install.*error"; then
        echo "🔧 Poetry dependency installation failed"
        echo "   Root Cause: Check pyproject.toml dependencies or lock file"
        echo "   Fix: Run 'poetry lock --no-update' or check for incompatible versions"
        return 0
    fi

    # Python module errors
    if echo "$error_text" | grep -qi "ModuleNotFoundError\|ImportError"; then
        local module
        module=$(echo "$error_text" | grep -oP "(?<=No module named ')[^']+(?=')" | head -1 || echo "unknown")
        echo "🔧 Missing Python dependency: $module"
        echo "   Root Cause: Module not in requirements or not installed"
        echo "   Fix: Add '$module' to pyproject.toml dependencies"
        return 0
    fi

    # Test failures
    if echo "$error_text" | grep -qi "FAILED.*test_\|AssertionError\|pytest.*failed"; then
        echo "🔧 Test failures detected"
        echo "   Root Cause: Test assertions failing"
        echo "   Fix: Run 'pytest -v' locally to reproduce and debug"
        return 0
    fi

    # Type checking errors
    if echo "$error_text" | grep -qi "mypy.*error\|type.*error"; then
        echo "🔧 Type checking errors"
        echo "   Root Cause: Type annotations or type mismatches"
        echo "   Fix: Run 'mypy .' locally to see all type errors"
        return 0
    fi

    # Linting errors
    if echo "$error_text" | grep -qi "ruff.*found\|lint.*error"; then
        echo "🔧 Linting errors detected"
        echo "   Root Cause: Code style or lint rule violations"
        echo "   Fix: Run 'ruff check . --fix' to auto-fix"
        return 0
    fi

    # Docker service connection errors
    if echo "$error_text" | grep -qi "connection refused\|connection timeout"; then
        echo "🔧 Service connection failure"
        echo "   Root Cause: Service not ready or network issue"
        echo "   Fix: Add health checks or increase timeout in CI config"
        return 0
    fi

    # Generic error with snippet
    local error_line
    error_line=$(echo "$error_text" | grep -i "error\|fatal\|failed" | head -1 | sed 's/^[[:space:]]*//' | cut -c1-80)
    if [ -n "$error_line" ]; then
        echo "❌ Error: $error_line"
        return 0
    fi

    # Fallback: error text exists but no recognizable pattern found
    # Provide first non-empty line as context instead of failing silently
    local first_line
    first_line=$(echo "$error_text" | grep -v '^[[:space:]]*$' | head -1 | sed 's/^[[:space:]]*//' | cut -c1-80)
    if [ -n "$first_line" ]; then
        echo "❓ Unrecognized error pattern. First log line:"
        echo "   $first_line"
        return 0
    fi

    # No useful content found in error text
    return 1
}

# Display failed jobs grouped by workflow
echo -e "${BOLD}Failed Jobs:${NC}"
echo ""

# Group failures by workflow
WORKFLOWS=$(echo "$CI_JSON" | jq -r '.failures[].workflow' | sort -u)

while IFS= read -r workflow; do
    if [ -z "$workflow" ]; then
        continue
    fi

    echo -e "${CYAN}${BOLD}$workflow:${NC}"

    # Get failures for this workflow
    echo "$CI_JSON" | jq -r --arg workflow "$workflow" '
        .failures[] |
        select(.workflow == $workflow) |
        "\(.severity)\t\(.workflow_id)\t\(.job_id)\t\(.job)\t\(.step)"
    ' | while IFS=$'\t' read -r severity run_id job_id job step; do
        # Color code by severity
        case $severity in
            critical)
                echo -e "    ${RED}🔴 CRITICAL:${NC} ${job} → ${step}"
                ;;
            major)
                echo -e "    ${YELLOW}🟠 MAJOR:${NC} ${job} → ${step}"
                ;;
            minor)
                echo -e "    ${BLUE}🟡 MINOR:${NC} ${job} → ${step}"
                ;;
            *)
                echo -e "    ${NC}●${NC} ${job} → ${step}"
                ;;
        esac

        # Fetch and display error logs for this failure
        if [ -n "$job_id" ] && [ "$job_id" != "null" ]; then
            # Extract errors from logs (pass job_id and repository)
            error_logs=$(extract_error_from_logs "$job_id" "$REPO" 2>/dev/null || echo "")

            if [ -n "$error_logs" ]; then
                # Always show actual error details first (most valuable info)
                echo -e "       ${MAGENTA}Error details:${NC}"
                # Show up to 12 lines of error context with highlighting
                echo "$error_logs" | head -12 | sed 's/^/         /' | \
                    sed -E "s/(Error|ERROR|Fatal|FATAL|Failed|FAILED|Exception|Traceback|AssertionError|violation|VIOLATION)/${RED}\1${MAGENTA}/g"

                # Then optionally add diagnosis if pattern is recognized
                diagnosis=$(detect_error_pattern "$error_logs" 2>/dev/null || echo "")
                if [ -n "$diagnosis" ] && ! echo "$diagnosis" | grep -qi "unknown"; then
                    echo ""
                    echo "$diagnosis" | while IFS= read -r line; do
                        echo -e "       ${CYAN}💡 ${line}${NC}"
                    done
                fi
            fi
        fi

        echo ""
    done

    echo ""
done <<< "$WORKFLOWS"

# Display top error patterns (extract from step names)
echo -e "${BOLD}Top Error Patterns:${NC}"
echo ""

# Get unique step names and count occurrences
echo "$CI_JSON" | jq -r '.failures[].step' | sort | uniq -c | sort -rn | head -5 | while read -r count step; do
    echo -e "  ${MAGENTA}●${NC} ${step} ${CYAN}(${count} occurrence(s))${NC}"
done

echo ""

# Generate helpful suggestions based on error patterns
echo -e "${BOLD}💡 Suggested Actions:${NC}"
echo ""

# Check for common patterns
HAS_TEST_FAILURES=$(echo "$CI_JSON" | jq -r '.failures[] | select(.step | test("[Tt]est|pytest"))' | wc -l)
HAS_LINT_FAILURES=$(echo "$CI_JSON" | jq -r '.failures[] | select(.step | test("[Ll]int|ruff|flake8"))' | wc -l)
HAS_TYPE_FAILURES=$(echo "$CI_JSON" | jq -r '.failures[] | select(.step | test("[Tt]ype|mypy"))' | wc -l)
HAS_BUILD_FAILURES=$(echo "$CI_JSON" | jq -r '.failures[] | select(.step | test("[Bb]uild|compile"))' | wc -l)

if [ "$HAS_TEST_FAILURES" -gt 0 ]; then
    echo -e "  ${GREEN}1.${NC} Review test failures - run tests locally to reproduce"
    echo -e "     ${CYAN}→ pytest -v tests/${NC}"
fi

if [ "$HAS_LINT_FAILURES" -gt 0 ]; then
    echo -e "  ${GREEN}2.${NC} Fix linting errors - run linter locally"
    echo -e "     ${CYAN}→ ruff check . --fix${NC}"
fi

if [ "$HAS_TYPE_FAILURES" -gt 0 ]; then
    echo -e "  ${GREEN}3.${NC} Fix type checking errors - run mypy locally"
    echo -e "     ${CYAN}→ mypy .${NC}"
fi

if [ "$HAS_BUILD_FAILURES" -gt 0 ]; then
    echo -e "  ${GREEN}4.${NC} Fix build failures - check dependencies and syntax"
    echo -e "     ${CYAN}→ Check requirements.txt and package.json${NC}"
fi

echo -e "  ${GREEN}5.${NC} View detailed logs on GitHub Actions"
echo -e "     ${CYAN}→ https://github.com/$REPO/pull/$PR_NUMBER/checks${NC}"

echo ""

# Display footer
echo -e "${BOLD}─────────────────────────────────────────────────────────────────${NC}"

# Determine merge readiness
if [ "$CRITICAL" -gt 0 ]; then
    echo -e "${RED}${BOLD}❌ NOT READY TO MERGE${NC} - Critical failures must be fixed"
elif [ "$MAJOR" -gt 0 ]; then
    echo -e "${YELLOW}${BOLD}⚠️  REVIEW NEEDED${NC} - Major failures should be addressed"
elif [ "$MINOR" -gt 0 ]; then
    echo -e "${BLUE}${BOLD}✓ READY TO MERGE${NC} - Minor issues can be addressed later"
else
    echo -e "${GREEN}${BOLD}✅ READY TO MERGE${NC} - All checks passed"
fi

echo -e "${BOLD}─────────────────────────────────────────────────────────────────${NC}"
echo ""

# Exit with appropriate code
if [ "$CRITICAL" -gt 0 ] || [ "$MAJOR" -gt 0 ]; then
    exit 1
else
    exit 0
fi
