From d352d6e7f8f5bb88c3f668b5d7befea38887e3bb Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Sun, 8 Mar 2026 18:17:46 +0000 Subject: [PATCH] resource quota review: fix OOM risks, close quota gaps, add HA protections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 1 - OOM fixes: - dashy: increase memory limit 512Mi→1Gi (was at 99% utilization) - caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%) - mysql-operator: add Helm resource values 256Mi/512Mi, create namespace with tier label (was at 92% of LimitRange default) - prowlarr, flaresolverr, annas-archive-stacks: add explicit resources (outgrowing 256Mi LimitRange defaults) - real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no explicit resources) Phase 2 - Close quota gaps: - nvidia, real-estate-crawler, trading-bot: remove custom-quota=true labels so Kyverno generates tier-appropriate quotas - descheduler: add tier=1-cluster label for proper classification Phase 3 - Reduce excessive quotas: - monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64 - woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16 - GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16 Phase 4 - Kubelet protection: - Add cpu: 200m to systemReserved and kubeReserved in kubelet template Phase 5 - HA improvements: - cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1) - grafana: add topology spread + PDB via Helm values - crowdsec LAPI: add topology spread + PDB via Helm values - authentik server: add topology spread via Helm values - authentik worker: add topology spread + PDB via Helm values --- .claude/reference/proxmox-inventory.md | 10 +++---- stacks/calibre/main.tf | 10 +++++++ stacks/dashy/main.tf | 4 +-- stacks/descheduler/main.tf | 3 +++ stacks/infra/main.tf | 2 ++ stacks/platform/modules/authentik/values.yaml | 17 ++++++++++++ stacks/platform/modules/cloudflared/main.tf | 25 ++++++++++++++++++ stacks/platform/modules/crowdsec/values.yaml | 11 ++++++++ stacks/platform/modules/dbaas/main.tf | 26 +++++++++++++++++-- .../modules/kyverno/resource-governance.tf | 4 +-- stacks/platform/modules/monitoring/caretta.tf | 10 +++++++ .../monitoring/grafana_chart_values.yaml | 9 +++++++ stacks/platform/modules/monitoring/main.tf | 4 +-- stacks/platform/modules/nvidia/main.tf | 1 - stacks/real-estate-crawler/main.tf | 11 +++++++- stacks/servarr/flaresolverr/main.tf | 10 +++++++ stacks/servarr/prowlarr/main.tf | 10 +++++++ stacks/trading-bot/main.tf | 1 - stacks/woodpecker/main.tf | 4 +-- 19 files changed, 154 insertions(+), 18 deletions(-) diff --git a/.claude/reference/proxmox-inventory.md b/.claude/reference/proxmox-inventory.md index d097a5fc..46caee0c 100644 --- a/.claude/reference/proxmox-inventory.md +++ b/.claude/reference/proxmox-inventory.md @@ -29,11 +29,11 @@ | 102 | devvm | running | 16 | 8GB | vmbr1:vlan10 | 100G | Development VM | | 103 | home-assistant | running | 8 | 8GB | vmbr0 | 64G | HA Sofia, net0(vlan10) disabled, SSH: vbarzin@192.168.1.8 | | 105 | pbs | stopped | 16 | 8GB | vmbr1:vlan10 | 32G | Proxmox Backup (unused) | -| 200 | k8s-master | running | 8 | 16GB | vmbr1:vlan20 | 64G | Control plane (10.0.20.100) | -| 201 | k8s-node1 | running | 16 | 24GB | vmbr1:vlan20 | 256G | GPU node, Tesla T4 | -| 202 | k8s-node2 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker | -| 203 | k8s-node3 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker | -| 204 | k8s-node4 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker | +| 200 | k8s-master | running | 8 | 8GB* | vmbr1:vlan20 | 64G | Control plane (10.0.20.100). *Verify via `qm config 200` | +| 201 | k8s-node1 | running | 16 | 16GB* | vmbr1:vlan20 | 256G | GPU node, Tesla T4. *Verify via `qm config 201` | +| 202 | k8s-node2 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) | +| 203 | k8s-node3 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) | +| 204 | k8s-node4 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) | | 220 | docker-registry | running | 4 | 4GB | vmbr1:vlan20 | 64G | MAC DE:AD:BE:EF:22:22 (10.0.20.10) | | 300 | Windows10 | running | 16 | 8GB | vmbr0 | 100G | Windows VM | | 9000 | truenas | running | 16 | 16GB | vmbr1:vlan10 | 32G+7x256G+1T | NFS (10.0.10.15) | diff --git a/stacks/calibre/main.tf b/stacks/calibre/main.tf index 1424fc03..5cd1635c 100644 --- a/stacks/calibre/main.tf +++ b/stacks/calibre/main.tf @@ -313,6 +313,16 @@ resource "kubernetes_deployment" "annas-archive-stacks" { container { image = "zelest/stacks:latest" name = "annas-archive-stacks" + resources { + requests = { + cpu = "10m" + memory = "192Mi" + } + limits = { + cpu = "500m" + memory = "384Mi" + } + } port { container_port = 7788 } diff --git a/stacks/dashy/main.tf b/stacks/dashy/main.tf index 650b67fa..00649a83 100644 --- a/stacks/dashy/main.tf +++ b/stacks/dashy/main.tf @@ -71,11 +71,11 @@ resource "kubernetes_deployment" "dashy" { resources { requests = { cpu = "50m" - memory = "128Mi" + memory = "512Mi" } limits = { cpu = "500m" - memory = "512Mi" + memory = "1Gi" } } port { diff --git a/stacks/descheduler/main.tf b/stacks/descheduler/main.tf index 0f62eea2..4a0e7f09 100644 --- a/stacks/descheduler/main.tf +++ b/stacks/descheduler/main.tf @@ -3,6 +3,9 @@ resource "kubernetes_namespace" "descheduler" { metadata { name = "descheduler" + labels = { + tier = local.tiers.cluster + } } } diff --git a/stacks/infra/main.tf b/stacks/infra/main.tf index 8015c9aa..aca15b6a 100644 --- a/stacks/infra/main.tf +++ b/stacks/infra/main.tf @@ -93,8 +93,10 @@ module "k8s-node-template" { cat <<'KUBELET_PATCH' | sudo tee -a /var/lib/kubelet/config.yaml systemReserved: memory: "512Mi" + cpu: "200m" kubeReserved: memory: "512Mi" + cpu: "200m" evictionHard: memory.available: "500Mi" nodefs.available: "10%" diff --git a/stacks/platform/modules/authentik/values.yaml b/stacks/platform/modules/authentik/values.yaml index 5fe833e0..d07bf946 100644 --- a/stacks/platform/modules/authentik/values.yaml +++ b/stacks/platform/modules/authentik/values.yaml @@ -24,6 +24,13 @@ server: limits: cpu: "2" memory: 1Gi + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/component: server ingress: enabled: false # hosts: @@ -46,3 +53,13 @@ worker: limits: cpu: "1" memory: 1Gi + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/component: worker + pdb: + enabled: true + maxUnavailable: 1 diff --git a/stacks/platform/modules/cloudflared/main.tf b/stacks/platform/modules/cloudflared/main.tf index 3b9bc72a..f10ee9e6 100644 --- a/stacks/platform/modules/cloudflared/main.tf +++ b/stacks/platform/modules/cloudflared/main.tf @@ -47,6 +47,16 @@ resource "kubernetes_deployment" "cloudflared" { } } spec { + topology_spread_constraint { + max_skew = 1 + topology_key = "kubernetes.io/hostname" + when_unsatisfiable = "ScheduleAnyway" + label_selector { + match_labels = { + app = "cloudflared" + } + } + } container { # image = "wisdomsky/cloudflared-web:latest" image = "cloudflare/cloudflared" @@ -82,6 +92,21 @@ resource "kubernetes_deployment" "cloudflared" { } } +resource "kubernetes_pod_disruption_budget_v1" "cloudflared" { + metadata { + name = "cloudflared" + namespace = kubernetes_namespace.cloudflared.metadata[0].name + } + spec { + max_unavailable = "1" + selector { + match_labels = { + app = "cloudflared" + } + } + } +} + resource "kubernetes_service" "cloudflared" { metadata { name = "cloudflared" diff --git a/stacks/platform/modules/crowdsec/values.yaml b/stacks/platform/modules/crowdsec/values.yaml index 1ea93235..b7b016d9 100644 --- a/stacks/platform/modules/crowdsec/values.yaml +++ b/stacks/platform/modules/crowdsec/values.yaml @@ -46,6 +46,17 @@ agent: lapi: priorityClassName: "tier-1-cluster" replicas: 3 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: crowdsec + type: lapi + pdb: + enabled: true + maxUnavailable: 1 extraSecrets: dbPassword: "${DB_PASSWORD}" storeCAPICredentialsInSecret: true diff --git a/stacks/platform/modules/dbaas/main.tf b/stacks/platform/modules/dbaas/main.tf index c223b30e..9a149e16 100644 --- a/stacks/platform/modules/dbaas/main.tf +++ b/stacks/platform/modules/dbaas/main.tf @@ -56,15 +56,37 @@ module "tls_secret" { # Operator installed in mysql-operator namespace (toleration for control-plane). # Init containers are slow (~20 min each) due to mysqlsh plugin loading. +resource "kubernetes_namespace" "mysql_operator" { + metadata { + name = "mysql-operator" + labels = { + tier = "1-cluster" + } + } +} + resource "helm_release" "mysql_operator" { - namespace = "mysql-operator" - create_namespace = true + namespace = kubernetes_namespace.mysql_operator.metadata[0].name + create_namespace = false name = "mysql-operator" timeout = 300 repository = "https://mysql.github.io/mysql-operator/" chart = "mysql-operator" version = "2.2.7" + + values = [yamlencode({ + resources = { + requests = { + cpu = "100m" + memory = "256Mi" + } + limits = { + cpu = "500m" + memory = "512Mi" + } + } + })] } # The mysql-sidecar ClusterRole created by the Helm chart is missing diff --git a/stacks/platform/modules/kyverno/resource-governance.tf b/stacks/platform/modules/kyverno/resource-governance.tf index d671ba5c..e019fdb8 100644 --- a/stacks/platform/modules/kyverno/resource-governance.tf +++ b/stacks/platform/modules/kyverno/resource-governance.tf @@ -615,8 +615,8 @@ resource "kubernetes_manifest" "generate_resourcequota_by_tier" { hard = { "requests.cpu" = "8" "requests.memory" = "8Gi" - "limits.cpu" = "48" - "limits.memory" = "96Gi" + "limits.cpu" = "16" + "limits.memory" = "32Gi" pods = "40" } } diff --git a/stacks/platform/modules/monitoring/caretta.tf b/stacks/platform/modules/monitoring/caretta.tf index d939e649..5f76ec17 100644 --- a/stacks/platform/modules/monitoring/caretta.tf +++ b/stacks/platform/modules/monitoring/caretta.tf @@ -14,6 +14,16 @@ resource "helm_release" "caretta" { victoria-metrics-single = { enabled = false } + resources = { + requests = { + cpu = "10m" + memory = "300Mi" + } + limits = { + cpu = "200m" + memory = "512Mi" + } + } })] } diff --git a/stacks/platform/modules/monitoring/grafana_chart_values.yaml b/stacks/platform/modules/monitoring/grafana_chart_values.yaml index 51bd6786..57b81053 100644 --- a/stacks/platform/modules/monitoring/grafana_chart_values.yaml +++ b/stacks/platform/modules/monitoring/grafana_chart_values.yaml @@ -9,6 +9,15 @@ resources: limits: cpu: 500m memory: 512Mi +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: grafana +podDisruptionBudget: + maxUnavailable: 1 persistence: enabled: false # using external mysql existingClaim: "grafana-pvc" diff --git a/stacks/platform/modules/monitoring/main.tf b/stacks/platform/modules/monitoring/main.tf index 082523df..1c9ffe2f 100644 --- a/stacks/platform/modules/monitoring/main.tf +++ b/stacks/platform/modules/monitoring/main.tf @@ -211,8 +211,8 @@ resource "kubernetes_resource_quota" "monitoring" { hard = { "requests.cpu" = "16" "requests.memory" = "16Gi" - "limits.cpu" = "120" - "limits.memory" = "240Gi" + "limits.cpu" = "64" + "limits.memory" = "64Gi" pods = "100" } } diff --git a/stacks/platform/modules/nvidia/main.tf b/stacks/platform/modules/nvidia/main.tf index c45b19d8..49151a3d 100644 --- a/stacks/platform/modules/nvidia/main.tf +++ b/stacks/platform/modules/nvidia/main.tf @@ -13,7 +13,6 @@ resource "kubernetes_namespace" "nvidia" { labels = { "istio-injection" : "disabled" tier = var.tier - "resource-governance/custom-quota" = "true" } } } diff --git a/stacks/real-estate-crawler/main.tf b/stacks/real-estate-crawler/main.tf index 05b7dc4e..6d91da16 100644 --- a/stacks/real-estate-crawler/main.tf +++ b/stacks/real-estate-crawler/main.tf @@ -18,7 +18,6 @@ resource "kubernetes_namespace" "realestate-crawler" { labels = { "istio-injection" : "disabled" tier = local.tiers.aux - "resource-governance/custom-quota" = "true" } } } @@ -321,6 +320,16 @@ resource "kubernetes_deployment" "realestate-crawler-celery" { image = "viktorbarzin/realestatecrawler:latest" image_pull_policy = "Always" command = ["python", "-m", "celery", "-A", "celery_app", "worker", "--loglevel=info", "--pool=threads"] + resources { + requests = { + cpu = "50m" + memory = "512Mi" + } + limits = { + cpu = "1" + memory = "3Gi" + } + } port { name = "metrics" container_port = 9090 diff --git a/stacks/servarr/flaresolverr/main.tf b/stacks/servarr/flaresolverr/main.tf index c1d6db15..1bd4828a 100644 --- a/stacks/servarr/flaresolverr/main.tf +++ b/stacks/servarr/flaresolverr/main.tf @@ -31,6 +31,16 @@ resource "kubernetes_deployment" "flaresolverr" { image = "ghcr.io/flaresolverr/flaresolverr:latest" name = "flaresolverr" + resources { + requests = { + cpu = "10m" + memory = "150Mi" + } + limits = { + cpu = "500m" + memory = "384Mi" + } + } port { container_port = 8191 } diff --git a/stacks/servarr/prowlarr/main.tf b/stacks/servarr/prowlarr/main.tf index c8148f37..aa064366 100644 --- a/stacks/servarr/prowlarr/main.tf +++ b/stacks/servarr/prowlarr/main.tf @@ -53,6 +53,16 @@ resource "kubernetes_deployment" "prowlarr" { image = "lscr.io/linuxserver/prowlarr:latest" name = "prowlarr" + resources { + requests = { + cpu = "10m" + memory = "192Mi" + } + limits = { + cpu = "500m" + memory = "384Mi" + } + } port { container_port = 9696 } diff --git a/stacks/trading-bot/main.tf b/stacks/trading-bot/main.tf index fa513b32..af8cce28 100644 --- a/stacks/trading-bot/main.tf +++ b/stacks/trading-bot/main.tf @@ -75,7 +75,6 @@ resource "kubernetes_namespace" "trading-bot" { name = "trading-bot" labels = { tier = local.tiers.edge - "resource-governance/custom-quota" = "true" } } } diff --git a/stacks/woodpecker/main.tf b/stacks/woodpecker/main.tf index f083480c..24db41b4 100644 --- a/stacks/woodpecker/main.tf +++ b/stacks/woodpecker/main.tf @@ -48,8 +48,8 @@ resource "kubernetes_resource_quota" "woodpecker" { hard = { "requests.cpu" = "16" "requests.memory" = "16Gi" - "limits.cpu" = "64" - "limits.memory" = "128Gi" + "limits.cpu" = "16" + "limits.memory" = "32Gi" pods = "60" } }