End of forgejo-registry-consolidation. After Phase 0/1 already landed
(Forgejo ready, dual-push CI, integrity probe, retention CronJob,
images migrated via forgejo-migrate-orphan-images.sh), this commit
flips everything off registry.viktorbarzin.me onto Forgejo and
removes the legacy infrastructure.
Phase 3 — image= flips:
* infra/stacks/{payslip-ingest,job-hunter,claude-agent-service,
fire-planner,freedify/factory,chrome-service,beads-server}/main.tf
— image= now points to forgejo.viktorbarzin.me/viktor/<name>.
* infra/stacks/claude-memory/main.tf — also moved off DockerHub
(viktorbarzin/claude-memory-mcp:17 → forgejo.viktorbarzin.me/viktor/...).
* infra/.woodpecker/{default,drift-detection}.yml — infra-ci pulled
from Forgejo. build-ci-image.yml dual-pushes still until next
build cycle confirms Forgejo as canonical.
* /home/wizard/code/CLAUDE.md — claude-memory-mcp install URL updated.
Phase 4 — decommission registry-private:
* registry-credentials Secret: dropped registry.viktorbarzin.me /
registry.viktorbarzin.me:5050 / 10.0.20.10:5050 auths entries.
Forgejo entry is the only one left.
* infra/stacks/infra/main.tf cloud-init: dropped containerd
hosts.toml entries for registry.viktorbarzin.me +
10.0.20.10:5050. (Existing nodes already had the file removed
manually by `setup-forgejo-containerd-mirror.sh` rollout — the
cloud-init template only fires on new VM provision.)
* infra/modules/docker-registry/docker-compose.yml: registry-private
service block removed; nginx 5050 port mapping dropped. Pull-
through caches for upstream registries (5000/5010/5020/5030/5040)
stay on the VM permanently.
* infra/modules/docker-registry/nginx_registry.conf: upstream
`private` block + port 5050 server block removed.
* infra/stacks/monitoring/modules/monitoring/main.tf: registry_
integrity_probe + registry_probe_credentials resources stripped.
forgejo_integrity_probe is the only manifest probe now.
Phase 5 — final docs sweep:
* infra/docs/runbooks/registry-vm.md — VM scope reduced to pull-
through caches; forgejo-registry-breakglass.md cross-ref added.
* infra/docs/architecture/ci-cd.md — registry component table +
diagram now reflect Forgejo. Pre-migration root-cause sentence
preserved as historical context with a pointer to the design doc.
* infra/docs/architecture/monitoring.md — Registry Integrity Probe
row updated to point at the Forgejo probe.
* infra/.claude/CLAUDE.md — Private registry section rewritten end-
to-end (auth, retention, integrity, where the bake came from).
* prometheus_chart_values.tpl — RegistryManifestIntegrityFailure
alert annotation simplified now that only one registry is in
scope.
Operational follow-up (cannot be done from a TF apply):
1. ssh root@10.0.20.10 — edit /opt/registry/docker-compose.yml to
match the new template AND `docker compose up -d --remove-orphans`
to actually stop the registry-private container. Memory id=1078
confirms cloud-init won't redeploy on TF apply alone.
2. After 1 week of no incidents, `rm -rf /opt/registry/data/private/`
on the VM (~2.6GB freed).
3. Open the dual-push step in build-ci-image.yml and drop
registry.viktorbarzin.me:5050 from the `repo:` list — at that
point the post-push integrity check at line 33-107 also needs
to be repointed at Forgejo or removed (the per-build verify is
redundant with the every-15min Forgejo probe).
Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
123 lines
5 KiB
YAML
123 lines
5 KiB
YAML
# Daily drift detection — runs terraform plan on all stacks and alerts on drift.
|
|
# Triggered by Woodpecker cron schedule "drift-detection" (must be registered in Woodpecker UI/API).
|
|
|
|
when:
|
|
event: cron
|
|
cron: drift-detection
|
|
|
|
clone:
|
|
git:
|
|
image: woodpeckerci/plugin-git
|
|
settings:
|
|
depth: 1
|
|
attempts: 3
|
|
|
|
steps:
|
|
- name: detect-drift
|
|
image: forgejo.viktorbarzin.me/viktor/infra-ci:latest
|
|
pull: true
|
|
backend_options:
|
|
kubernetes:
|
|
resources:
|
|
requests:
|
|
memory: 2Gi
|
|
limits:
|
|
memory: 4Gi
|
|
environment:
|
|
SLACK_WEBHOOK:
|
|
from_secret: slack_webhook
|
|
commands:
|
|
# ── git-crypt unlock ──
|
|
- |
|
|
SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
|
curl -sk "https://10.0.20.100:6443/api/v1/namespaces/woodpecker/configmaps/git-crypt-key" \
|
|
-H "Authorization:Bearer $SA_TOKEN" | jq -r .data.key | base64 -d > /tmp/key
|
|
git-crypt unlock /tmp/key && rm /tmp/key
|
|
|
|
# ── Vault auth ──
|
|
- |
|
|
SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
|
export VAULT_ADDR=http://vault-active.vault.svc.cluster.local:8200
|
|
export VAULT_TOKEN=$(curl -s -X POST "$VAULT_ADDR/v1/auth/kubernetes/login" \
|
|
-d "{\"role\":\"ci\",\"jwt\":\"$SA_TOKEN\"}" | jq -r .auth.client_token)
|
|
|
|
# ── Run terraform plan on all stacks ──
|
|
# Emits two timestamps per drifted stack so the Pushgateway/Prometheus
|
|
# side can compute drift-age-hours via `time() - drift_stack_first_seen`.
|
|
- |
|
|
DRIFTED=""
|
|
CLEAN=0
|
|
ERRORS=""
|
|
NOW=$(date +%s)
|
|
# Metrics accumulator — written once per stack, then pushed as a batch.
|
|
METRICS=""
|
|
|
|
for stack_dir in stacks/*/; do
|
|
stack=$(basename "$stack_dir")
|
|
[ -f "$stack_dir/terragrunt.hcl" ] || continue
|
|
|
|
echo -n "[$stack] planning... "
|
|
OUTPUT=$(cd "$stack_dir" && terragrunt plan -detailed-exitcode -input=false 2>&1)
|
|
EXIT=$?
|
|
|
|
case $EXIT in
|
|
0)
|
|
echo "OK (no changes)"
|
|
CLEAN=$((CLEAN + 1))
|
|
# drift_stack_state=0 means clean; age-hours irrelevant so we
|
|
# still push 0 so per-stack gauges don't go stale.
|
|
METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 0\n"
|
|
METRICS="${METRICS}drift_stack_age_hours{stack=\"$stack\"} 0\n"
|
|
;;
|
|
1)
|
|
echo "ERROR"
|
|
ERRORS="$ERRORS $stack"
|
|
METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 2\n"
|
|
;;
|
|
2)
|
|
echo "DRIFT DETECTED"
|
|
DRIFTED="$DRIFTED $stack"
|
|
# Fetch first-seen timestamp from Pushgateway (preserve across runs).
|
|
FIRST_SEEN=$(curl -s "http://prometheus-prometheus-pushgateway.monitoring:9091/metrics" \
|
|
| awk -v s="$stack" '$1 == "drift_stack_first_seen{stack=\""s"\"}" {print $2; exit}')
|
|
if [ -z "$FIRST_SEEN" ] || [ "$FIRST_SEEN" = "0" ]; then
|
|
FIRST_SEEN="$NOW"
|
|
fi
|
|
AGE_HOURS=$(( (NOW - FIRST_SEEN) / 3600 ))
|
|
METRICS="${METRICS}drift_stack_state{stack=\"$stack\"} 1\n"
|
|
METRICS="${METRICS}drift_stack_first_seen{stack=\"$stack\"} $FIRST_SEEN\n"
|
|
METRICS="${METRICS}drift_stack_age_hours{stack=\"$stack\"} $AGE_HOURS\n"
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Summary counters — single gauge per run.
|
|
DRIFT_COUNT=$(echo "$DRIFTED" | wc -w)
|
|
ERROR_COUNT=$(echo "$ERRORS" | wc -w)
|
|
METRICS="${METRICS}drift_stack_count $DRIFT_COUNT\n"
|
|
METRICS="${METRICS}drift_error_count $ERROR_COUNT\n"
|
|
METRICS="${METRICS}drift_clean_count $CLEAN\n"
|
|
METRICS="${METRICS}drift_detection_last_run_timestamp $NOW\n"
|
|
|
|
# ── Push to Pushgateway ──
|
|
# One batched push keeps the run atomic: either all metrics land or none.
|
|
printf "%b" "$METRICS" | curl -s --data-binary @- \
|
|
http://prometheus-prometheus-pushgateway.monitoring:9091/metrics/job/drift-detection \
|
|
|| echo "(pushgateway unavailable, metrics lost for this run)"
|
|
|
|
echo ""
|
|
echo "=== Drift Detection Summary ==="
|
|
echo "Clean: $CLEAN stacks"
|
|
echo "Drift: ${DRIFTED:-none}"
|
|
echo "Errors: ${ERRORS:-none}"
|
|
|
|
# ── Slack alert if drift found ──
|
|
if [ -n "$DRIFTED" ]; then
|
|
curl -s -X POST -H 'Content-type: application/json' \
|
|
--data "{\"channel\":\"general\",\"text\":\":warning: Drift detected in:${DRIFTED}\nClean: ${CLEAN} stacks. Errors:${ERRORS:-none}\"}" \
|
|
"$SLACK_WEBHOOK" || true
|
|
else
|
|
curl -s -X POST -H 'Content-type: application/json' \
|
|
--data "{\"channel\":\"general\",\"text\":\":white_check_mark: Drift detection: all ${CLEAN} stacks clean${ERRORS:+. Errors: $ERRORS}\"}" \
|
|
"$SLACK_WEBHOOK" || true
|
|
fi
|