2026-03-07 14:30:36 +00:00
|
|
|
variable "tls_secret_name" {
|
2026-03-14 08:51:45 +00:00
|
|
|
type = string
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
|
|
|
|
}
|
2026-03-14 17:15:48 +00:00
|
|
|
data "vault_kv_secret_v2" "secrets" {
|
|
|
|
|
mount = "secret"
|
|
|
|
|
name = "webhook-handler"
|
2026-03-07 14:30:36 +00:00
|
|
|
}
|
2026-02-22 13:56:34 +00:00
|
|
|
|
|
|
|
|
|
2026-02-22 15:13:55 +00:00
|
|
|
resource "kubernetes_namespace" "webhook-handler" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "webhook-handler"
|
|
|
|
|
labels = {
|
|
|
|
|
tier = local.tiers.aux
|
|
|
|
|
}
|
|
|
|
|
}
|
[infra] Suppress Goldilocks vpa-update-mode label drift on all namespaces [ci skip]
## Context
Wave 3B-continued: the Goldilocks VPA dashboard (stacks/vpa) runs a Kyverno
ClusterPolicy `goldilocks-vpa-auto-mode` that mutates every namespace with
`metadata.labels["goldilocks.fairwinds.com/vpa-update-mode"] = "off"`. This
is intentional — Terraform owns container resource limits, and Goldilocks
should only provide recommendations, never auto-update. The label is how
Goldilocks decides per-namespace whether to run its VPA in `off` mode.
Effect on Terraform: every `kubernetes_namespace` resource shows the label
as pending-removal (`-> null`) on every `scripts/tg plan`. Dawarich survey
2026-04-18 confirmed the drift. Cluster-side count: 88 namespaces carry the
label (`kubectl get ns -o json | jq ... | wc -l`). Every TF-managed namespace
is affected.
This commit brings the intentional admission drift under the same
`# KYVERNO_LIFECYCLE_V1` discoverability marker introduced in c9d221d5 for
the ndots dns_config pattern. The marker now stands generically for any
Kyverno admission-webhook drift suppression; the inline comment records
which specific policy stamps which specific field so future grep audits
show why each suppression exists.
## This change
107 `.tf` files touched — every stack's `resource "kubernetes_namespace"`
resource gets:
```hcl
lifecycle {
# KYVERNO_LIFECYCLE_V1: goldilocks-vpa-auto-mode ClusterPolicy stamps this label on every namespace
ignore_changes = [metadata[0].labels["goldilocks.fairwinds.com/vpa-update-mode"]]
}
```
Injection was done with a brace-depth-tracking Python pass (`/tmp/add_goldilocks_ignore.py`):
match `^resource "kubernetes_namespace" ` → track `{` / `}` until the
outermost closing brace → insert the lifecycle block before the closing
brace. The script is idempotent (skips any file that already mentions
`goldilocks.fairwinds.com/vpa-update-mode`) so re-running is safe.
Vault stack picked up 2 namespaces in the same file (k8s-users produces
one, plus a second explicit ns) — confirmed via file diff (+8 lines).
## What is NOT in this change
- `stacks/trading-bot/main.tf` — entire file is `/* … */` commented out
(paused 2026-04-06 per user decision). Reverted after the script ran.
- `stacks/_template/main.tf.example` — per-stack skeleton, intentionally
minimal. User keeps it that way. Not touched by the script (file
has no real `resource "kubernetes_namespace"` — only a placeholder
comment).
- `.terraform/` copies (e.g. `stacks/metallb/.terraform/modules/...`) —
gitignored, won't commit; the live path was edited.
- `terraform fmt` cleanup of adjacent pre-existing alignment issues in
authentik, freedify, hermes-agent, nvidia, vault, meshcentral. Reverted
to keep the commit scoped to the Goldilocks sweep. Those files will
need a separate fmt-only commit or will be cleaned up on next real
apply to that stack.
## Verification
Dawarich (one of the hundred-plus touched stacks) showed the pattern
before and after:
```
$ cd stacks/dawarich && ../../scripts/tg plan
Before:
Plan: 0 to add, 2 to change, 0 to destroy.
# kubernetes_namespace.dawarich will be updated in-place
(goldilocks.fairwinds.com/vpa-update-mode -> null)
# module.tls_secret.kubernetes_secret.tls_secret will be updated in-place
(Kyverno generate.* labels — fixed in 8d94688d)
After:
No changes. Your infrastructure matches the configuration.
```
Injection count check:
```
$ rg -c 'KYVERNO_LIFECYCLE_V1: goldilocks-vpa-auto-mode' stacks/ | awk -F: '{s+=$2} END {print s}'
108
```
## Reproduce locally
1. `git pull`
2. Pick any stack: `cd stacks/<name> && ../../scripts/tg plan`
3. Expect: no drift on the namespace's goldilocks.fairwinds.com/vpa-update-mode label.
Closes: code-dwx
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-18 21:15:27 +00:00
|
|
|
lifecycle {
|
|
|
|
|
# KYVERNO_LIFECYCLE_V1: goldilocks-vpa-auto-mode ClusterPolicy stamps this label on every namespace
|
|
|
|
|
ignore_changes = [metadata[0].labels["goldilocks.fairwinds.com/vpa-update-mode"]]
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module "tls_secret" {
|
|
|
|
|
source = "../../modules/kubernetes/setup_tls_secret"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
tls_secret_name = var.tls_secret_name
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "kubernetes_cluster_role" "deployment_updater" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "deployment-updater"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rule {
|
|
|
|
|
verbs = ["create", "update", "get", "patch", "list"]
|
|
|
|
|
api_groups = ["extensions", "apps", ""]
|
|
|
|
|
resources = ["deployments", "namespaces", "pods", "services"]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "kubernetes_cluster_role_binding" "update_deployment_binding" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "update-deployment-binding"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
subject {
|
|
|
|
|
kind = "ServiceAccount"
|
|
|
|
|
name = "default"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
role_ref {
|
|
|
|
|
api_group = "rbac.authorization.k8s.io"
|
|
|
|
|
kind = "ClusterRole"
|
|
|
|
|
name = "deployment-updater"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
resource "kubernetes_secret" "ssh-key" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "ssh-key"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
|
|
|
|
|
annotations = {
|
|
|
|
|
"reloader.stakater.com/match" = "true"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
data = {
|
2026-03-14 17:15:48 +00:00
|
|
|
"id_rsa" = data.vault_kv_secret_v2.secrets.data["ssh_key"]
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
type = "generic"
|
|
|
|
|
}
|
|
|
|
|
resource "kubernetes_deployment" "webhook_handler" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "webhook-handler"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
labels = {
|
|
|
|
|
app = "webhook-handler"
|
|
|
|
|
tier = local.tiers.aux
|
|
|
|
|
}
|
|
|
|
|
annotations = {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
"reloader.stakater.com/auto" = "true"
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
replicas = 1
|
|
|
|
|
selector {
|
|
|
|
|
match_labels = {
|
|
|
|
|
app = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
template {
|
|
|
|
|
metadata {
|
|
|
|
|
labels = {
|
|
|
|
|
app = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
container {
|
|
|
|
|
# security_context {
|
|
|
|
|
# run_as_user = 1000
|
|
|
|
|
# }
|
|
|
|
|
# lifecycle {
|
|
|
|
|
# post_start {
|
|
|
|
|
# exec {
|
|
|
|
|
# # Must be kept in sycn with webhook_handler dockerfile
|
|
|
|
|
# command = ["echo", "\"$SSH_KEY\"", ">", "/opt/id_rsa", "&&", "chown", "appuser", "/opt/id_rsa", "&&", "chmod", "600", "/opt/id_rsa"]
|
|
|
|
|
# }
|
|
|
|
|
# }
|
|
|
|
|
# }
|
|
|
|
|
image = "viktorbarzin/webhook-handler:latest"
|
|
|
|
|
name = "webhook-handler"
|
|
|
|
|
resources {
|
|
|
|
|
limits = {
|
right-size memory: set requests=limits based on actual usage
- Set memory requests = limits across 56 stacks to prevent overcommit
- Right-sized limits based on actual pod usage (2x actual, rounded up)
- Scaled down trading-bot (replicas=0) to free memory
- Fixed OOMKilled services: forgejo, dawarich, health, meshcentral,
paperless-ngx, vault auto-unseal, rybbit, whisper, openclaw, clickhouse
- Added startup+liveness probes to calibre-web
- Bumped inotify limits on nodes 2,3 (max_user_instances 128->8192)
Post node2 OOM incident (2026-03-14). Previous kubelet config had no
kubeReserved/systemReserved set, allowing pods to starve the kernel.
2026-03-14 21:01:24 +00:00
|
|
|
memory = "64Mi"
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
requests = {
|
[ci skip] right-size all pod resources based on VPA + live metrics audit
Full cluster resource audit: cross-referenced Goldilocks VPA recommendations,
live kubectl top metrics, and Terraform definitions for 100+ containers.
Critical fixes:
- dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit
- stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit
- traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi
Added explicit resources to ~40 containers that had none:
- audiobookshelf, changedetection, cyberchef, dawarich, diun, echo,
excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n,
navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor,
tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver,
cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard,
k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server,
immich-postgresql, osrm-foot
GPU containers: added CPU/mem alongside GPU limits:
- ollama: removed CPU/mem limits (models vary in size), keep GPU only
- frigate: req 500m/2Gi, lim 4/8Gi + GPU
- immich-ml: req 100m/1Gi, lim 2/4Gi + GPU
Right-sized ~25 over-provisioned containers:
- kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi)
- onlyoffice: CPU 8 → 2 (VPA upper 45m)
- realestate-crawler-api: CPU 2000m → 250m
- blog/travel-blog/webhook-handler: 500m → 100m
- coturn/health/plotting-book: reduced to match actual usage
Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
2026-03-01 19:18:50 +00:00
|
|
|
cpu = "10m"
|
right-size memory: set requests=limits based on actual usage
- Set memory requests = limits across 56 stacks to prevent overcommit
- Right-sized limits based on actual pod usage (2x actual, rounded up)
- Scaled down trading-bot (replicas=0) to free memory
- Fixed OOMKilled services: forgejo, dawarich, health, meshcentral,
paperless-ngx, vault auto-unseal, rybbit, whisper, openclaw, clickhouse
- Added startup+liveness probes to calibre-web
- Bumped inotify limits on nodes 2,3 (max_user_instances 128->8192)
Post node2 OOM incident (2026-03-14). Previous kubelet config had no
kubeReserved/systemReserved set, allowing pods to starve the kernel.
2026-03-14 21:01:24 +00:00
|
|
|
memory = "64Mi"
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
port {
|
|
|
|
|
container_port = 80
|
|
|
|
|
}
|
|
|
|
|
volume_mount {
|
|
|
|
|
name = "id-rsa"
|
|
|
|
|
mount_path = "/opt/id_rsa"
|
|
|
|
|
sub_path = "id_rsa"
|
|
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "WEBHOOKSECRET"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "secret"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "FB_APP_SECRET"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "fb_app_secret"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "FB_VERIFY_TOKEN"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "fb_verify_token"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "FB_PAGE_TOKEN"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "fb_page_token"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
|
|
|
|
name = "CONFIG"
|
|
|
|
|
value = "./chatbot/config/viktorwebservices.yaml"
|
|
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "GIT_USER"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "git_user"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
name = "GIT_TOKEN"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "git_token"
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
env {
|
|
|
|
|
name = "SSH_KEY"
|
|
|
|
|
value = "/opt/id_rsa"
|
|
|
|
|
}
|
2026-03-17 23:56:30 +00:00
|
|
|
env {
|
|
|
|
|
name = "WOODPECKER_API_URL"
|
|
|
|
|
value = "https://ci.viktorbarzin.me"
|
|
|
|
|
}
|
|
|
|
|
env {
|
|
|
|
|
name = "WOODPECKER_TOKEN"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "woodpecker_token"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
env {
|
|
|
|
|
name = "WOODPECKER_INFRA_REPO_ID"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "woodpecker_infra_repo_id"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
env {
|
|
|
|
|
name = "AUTHENTIK_WEBHOOK_SECRET"
|
|
|
|
|
value_from {
|
|
|
|
|
secret_key_ref {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
key = "authentik_webhook_secret"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
volume {
|
|
|
|
|
name = "id-rsa"
|
|
|
|
|
secret {
|
|
|
|
|
secret_name = "ssh-key"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-03-18 21:25:03 +00:00
|
|
|
lifecycle {
|
[infra] Establish KYVERNO_LIFECYCLE_V1 drift-suppression convention [ci skip]
## Context
Phase 1 of the state-drift consolidation audit (plan Wave 3) identified that
the entire repo leans on a repeated `lifecycle { ignore_changes = [...dns_config] }`
snippet to suppress Kyverno's admission-webhook dns_config mutation (the ndots=2
override that prevents NxDomain search-domain flooding). 27 occurrences across
19 stacks. Without this suppression, every pod-owning resource shows perpetual
TF plan drift.
The original plan proposed a shared `modules/kubernetes/kyverno_lifecycle/`
module emitting the ignore-paths list as an output that stacks would consume in
their `ignore_changes` blocks. That approach is architecturally impossible:
Terraform's `ignore_changes` meta-argument accepts only static attribute paths
— it rejects module outputs, locals, variables, and any expression (the HCL
spec evaluates `lifecycle` before the regular expression graph). So a DRY
module cannot exist. The canonical pattern IS the repeated snippet.
What the snippet was missing was a *discoverability tag* so that (a) new
resources can be validated for compliance, (b) the existing 27 sites can be
grep'd in a single command, and (c) future maintainers understand the
convention rather than each reinventing it.
## This change
- Introduces `# KYVERNO_LIFECYCLE_V1` as the canonical marker comment.
Attached inline on every `spec[0].template[0].spec[0].dns_config` line
(or `spec[0].job_template[0].spec[0]...` for CronJobs) across all 27
existing suppression sites.
- Documents the convention with rationale and copy-paste snippets in
`AGENTS.md` → new "Kyverno Drift Suppression" section.
- Expands the existing `.claude/CLAUDE.md` Kyverno ndots note to reference
the marker and explain why the module approach is blocked.
- Updates `_template/main.tf.example` so every new stack starts compliant.
## What is NOT in this change
- The `kubernetes_manifest` Kyverno annotation drift (beads `code-seq`)
— that is Phase B with a sibling `# KYVERNO_MANIFEST_V1` marker.
- Behavioral changes — every `ignore_changes` list is byte-identical
save for the inline comment.
- The fallback module the original plan anticipated — skipped because
Terraform rejects expressions in `ignore_changes`.
- `terraform fmt` cleanup on adjacent unrelated blocks in three files
(claude-agent-service, freedify/factory, hermes-agent). Reverted to
keep this commit scoped to the convention rollout.
## Before / after
Before (cannot distinguish accidental-forgotten from intentional-convention):
```hcl
lifecycle {
ignore_changes = [spec[0].template[0].spec[0].dns_config]
}
```
After (greppable, self-documenting, discoverable by tooling):
```hcl
lifecycle {
ignore_changes = [spec[0].template[0].spec[0].dns_config] # KYVERNO_LIFECYCLE_V1
}
```
## Test Plan
### Automated
```
$ rg -c 'KYVERNO_LIFECYCLE_V1' stacks/ --include='*.tf' --include='*.tf.example' \
| awk -F: '{s+=$2} END {print s}'
27
$ git diff --stat | grep -E '\.(tf|tf\.example|md)$' | wc -l
21
# All code-file diffs are 1 insertion + 1 deletion per marker site,
# except beads-server (3), ebooks (4), immich (3), uptime-kuma (2).
$ git diff --stat stacks/ | tail -1
20 files changed, 45 insertions(+), 28 deletions(-)
```
### Manual Verification
No apply required — HCL comments only. Zero effect on any stack's plan output.
Future audits: `rg 'KYVERNO_LIFECYCLE_V1' stacks/ | wc -l` must grow as new
pod-owning resources are added.
## Reproduce locally
1. `cd infra && git pull`
2. `rg 'KYVERNO_LIFECYCLE_V1' stacks/` → expect 27 hits in 19 files
3. Grep any new `kubernetes_deployment` for the marker; absence = missing
suppression.
Closes: code-28m
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-18 14:15:51 +00:00
|
|
|
ignore_changes = [spec[0].template[0].spec[0].dns_config] # KYVERNO_LIFECYCLE_V1
|
2026-03-18 21:25:03 +00:00
|
|
|
}
|
2026-02-22 15:13:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "kubernetes_service" "webhook_handler" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "webhook-handler"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
labels = {
|
|
|
|
|
"app" = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spec {
|
|
|
|
|
selector = {
|
|
|
|
|
app = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
port {
|
|
|
|
|
port = "80"
|
|
|
|
|
target_port = "3000"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module "ingress" {
|
|
|
|
|
source = "../../modules/kubernetes/ingress_factory"
|
|
|
|
|
namespace = kubernetes_namespace.webhook-handler.metadata[0].name
|
|
|
|
|
name = "webhook-handler"
|
|
|
|
|
host = "webhook"
|
2026-04-16 13:45:04 +00:00
|
|
|
dns_type = "non-proxied"
|
2026-02-22 15:13:55 +00:00
|
|
|
tls_secret_name = var.tls_secret_name
|
2026-03-07 16:41:36 +00:00
|
|
|
extra_annotations = {
|
|
|
|
|
"gethomepage.dev/enabled" = "true"
|
|
|
|
|
"gethomepage.dev/name" = "Webhook Handler"
|
|
|
|
|
"gethomepage.dev/description" = "Webhook relay"
|
|
|
|
|
"gethomepage.dev/icon" = "webhook.png"
|
|
|
|
|
"gethomepage.dev/group" = "Automation"
|
|
|
|
|
"gethomepage.dev/pod-selector" = ""
|
|
|
|
|
}
|
2026-02-22 13:56:34 +00:00
|
|
|
}
|
migrate consuming stacks to ESO + remove k8s-dashboard static token
Phase 9: ExternalSecret migration across 26 stacks:
Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)
Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)
17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.
Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
2026-03-15 19:05:04 +00:00
|
|
|
|
|
|
|
|
resource "kubernetes_manifest" "external_secret" {
|
|
|
|
|
manifest = {
|
|
|
|
|
apiVersion = "external-secrets.io/v1beta1"
|
|
|
|
|
kind = "ExternalSecret"
|
|
|
|
|
metadata = {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
namespace = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
spec = {
|
|
|
|
|
refreshInterval = "15m"
|
|
|
|
|
secretStoreRef = {
|
|
|
|
|
name = "vault-kv"
|
|
|
|
|
kind = "ClusterSecretStore"
|
|
|
|
|
}
|
|
|
|
|
target = {
|
|
|
|
|
name = "webhook-handler-secrets"
|
|
|
|
|
}
|
|
|
|
|
dataFrom = [{
|
|
|
|
|
extract = {
|
|
|
|
|
key = "webhook-handler"
|
|
|
|
|
}
|
|
|
|
|
}]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
depends_on = [kubernetes_namespace.webhook-handler]
|
|
|
|
|
}
|