diff --git a/.woodpecker/drift-detection.yml b/.woodpecker/drift-detection.yml index f1d491a4..4727ae5c 100644 --- a/.woodpecker/drift-detection.yml +++ b/.woodpecker/drift-detection.yml @@ -42,10 +42,15 @@ steps: -d "{\"role\":\"ci\",\"jwt\":\"$SA_TOKEN\"}" | jq -r .auth.client_token) # ── Run terraform plan on all stacks ── + # Emits two timestamps per drifted stack so the Pushgateway/Prometheus + # side can compute drift-age-hours via `time() - drift_stack_first_seen`. - | DRIFTED="" CLEAN=0 ERRORS="" + NOW=$(date +%s) + # Metrics accumulator — written once per stack, then pushed as a batch. + METRICS="" for stack_dir in stacks/*/; do stack=$(basename "$stack_dir") @@ -56,12 +61,50 @@ steps: EXIT=$? case $EXIT in - 0) echo "OK (no changes)"; CLEAN=$((CLEAN + 1)) ;; - 1) echo "ERROR"; ERRORS="$ERRORS $stack" ;; - 2) echo "DRIFT DETECTED"; DRIFTED="$DRIFTED $stack" ;; + 0) + echo "OK (no changes)" + CLEAN=$((CLEAN + 1)) + # drift_stack_state=0 means clean; age-hours irrelevant so we + # still push 0 so per-stack gauges don't go stale. + METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 0\n" + METRICS="${METRICS}drift_stack_age_hours{stack=\"$stack\"} 0\n" + ;; + 1) + echo "ERROR" + ERRORS="$ERRORS $stack" + METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 2\n" + ;; + 2) + echo "DRIFT DETECTED" + DRIFTED="$DRIFTED $stack" + # Fetch first-seen timestamp from Pushgateway (preserve across runs). + FIRST_SEEN=$(curl -s "http://prometheus-prometheus-pushgateway.monitoring:9091/metrics" \ + | awk -v s="$stack" '$1 == "drift_stack_first_seen{stack=\""s"\"}" {print $2; exit}') + if [ -z "$FIRST_SEEN" ] || [ "$FIRST_SEEN" = "0" ]; then + FIRST_SEEN="$NOW" + fi + AGE_HOURS=$(( (NOW - FIRST_SEEN) / 3600 )) + METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 1\n" + METRICS="${METRICS}drift_stack_first_seen{stack=\"$stack\"} $FIRST_SEEN\n" + METRICS="${METRICS}drift_stack_age_hours{stack=\"$stack\"} $AGE_HOURS\n" + ;; esac done + # Summary counters — single gauge per run. + DRIFT_COUNT=$(echo "$DRIFTED" | wc -w) + ERROR_COUNT=$(echo "$ERRORS" | wc -w) + METRICS="${METRICS}drift_stack_count $DRIFT_COUNT\n" + METRICS="${METRICS}drift_error_count $ERROR_COUNT\n" + METRICS="${METRICS}drift_clean_count $CLEAN\n" + METRICS="${METRICS}drift_detection_last_run_timestamp $NOW\n" + + # ── Push to Pushgateway ── + # One batched push keeps the run atomic: either all metrics land or none. + printf "%b" "$METRICS" | curl -s --data-binary @- \ + http://prometheus-prometheus-pushgateway.monitoring:9091/metrics/job/drift-detection \ + || echo "(pushgateway unavailable, metrics lost for this run)" + echo "" echo "=== Drift Detection Summary ===" echo "Clean: $CLEAN stacks" diff --git a/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl b/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl index 5188c1ca..e08e803f 100755 --- a/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl +++ b/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl @@ -1787,6 +1787,30 @@ serverFiles: severity: warning annotations: summary: "Privatebin has no available replicas" + - alert: DawarichIngestionStale + expr: (time() - dawarich_last_point_ingested_timestamp{user="viktor"}) > 172800 + for: 15m + labels: + severity: warning + annotations: + summary: "Dawarich: no points from viktor in >2 days" + description: "The iOS Dawarich app likely stopped sending location points. Open the app, verify it's running, and check background location permissions. Server-side is healthy when this alert fires — the issue is client-side." + - alert: DawarichIngestionMonitorStale + expr: (time() - dawarich_ingestion_monitor_last_push_timestamp{user="viktor"}) > 129600 + for: 15m + labels: + severity: warning + annotations: + summary: "Dawarich ingestion freshness monitor hasn't pushed in >36h" + description: "CronJob ingestion-freshness-monitor in dawarich ns isn't running or failing. Check `kubectl -n dawarich get cronjob ingestion-freshness-monitor` and recent Job logs." + - alert: DawarichIngestionMonitorNeverRun + expr: absent(dawarich_ingestion_monitor_last_push_timestamp{user="viktor"}) + for: 2h + labels: + severity: warning + annotations: + summary: "Dawarich ingestion freshness monitor has never pushed" + description: "Expected `dawarich_ingestion_monitor_last_push_timestamp` to appear once the daily CronJob runs. Check the CronJob in dawarich namespace." - name: "Network Traffic (GoFlow2)" rules: - alert: GoFlow2Down @@ -1939,6 +1963,38 @@ serverFiles: severity: warning annotations: summary: "Authentik outpost restarted {{ $value | printf \"%.0f\" }} times in 30m — check for OOM or crash loop" + - name: Infrastructure Drift + # Metrics pushed by .woodpecker/drift-detection.yml after each cron run. + # See Wave 7 of the state-drift consolidation plan. + rules: + - alert: DriftDetectionStale + # Drift detection pipeline hasn't reported in 26h. Either the cron + # didn't fire, or the job is failing before the push step. + expr: time() - max(drift_detection_last_run_timestamp) > 26 * 3600 + for: 30m + labels: + severity: warning + annotations: + summary: "Drift detection hasn't reported in {{ $value | humanizeDuration }} — check Woodpecker pipeline 'drift-detection'" + - alert: DriftUnaddressed + # Any stack drifted for >72h without being reconciled. Either apply + # to bring config in line, or update HCL to match desired state. + expr: max(drift_stack_age_hours) > 72 + for: 1h + labels: + severity: warning + annotations: + summary: "A stack has been drifted for {{ $value | printf \"%.0f\" }}h — run scripts/tg plan across stacks to identify and reconcile" + - alert: DriftStacksMany + # More than 10 stacks drifting simultaneously usually means a + # systemic issue (cluster upgrade, new admission controller, + # provider version bump) rather than individual misconfigurations. + expr: drift_stack_count > 10 + for: 30m + labels: + severity: warning + annotations: + summary: "{{ $value | printf \"%.0f\" }} stacks drifting — likely a systemic cause (new admission webhook, provider upgrade). Check the most recent drift-detection run in Woodpecker." extraScrapeConfigs: | - job_name: 'proxmox-host'