resource quota review: fix OOM risks, close quota gaps, add HA protections

Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
  with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
  (outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
  explicit resources)

Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
  labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification

Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16

Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template

Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
This commit is contained in:
Viktor Barzin 2026-03-08 18:17:46 +00:00
parent ead33b23dd
commit d352d6e7f8
19 changed files with 154 additions and 18 deletions

View file

@ -29,11 +29,11 @@
| 102 | devvm | running | 16 | 8GB | vmbr1:vlan10 | 100G | Development VM |
| 103 | home-assistant | running | 8 | 8GB | vmbr0 | 64G | HA Sofia, net0(vlan10) disabled, SSH: vbarzin@192.168.1.8 |
| 105 | pbs | stopped | 16 | 8GB | vmbr1:vlan10 | 32G | Proxmox Backup (unused) |
| 200 | k8s-master | running | 8 | 16GB | vmbr1:vlan20 | 64G | Control plane (10.0.20.100) |
| 201 | k8s-node1 | running | 16 | 24GB | vmbr1:vlan20 | 256G | GPU node, Tesla T4 |
| 202 | k8s-node2 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker |
| 203 | k8s-node3 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker |
| 204 | k8s-node4 | running | 8 | 16GB | vmbr1:vlan20 | 256G | Worker |
| 200 | k8s-master | running | 8 | 8GB* | vmbr1:vlan20 | 64G | Control plane (10.0.20.100). *Verify via `qm config 200` |
| 201 | k8s-node1 | running | 16 | 16GB* | vmbr1:vlan20 | 256G | GPU node, Tesla T4. *Verify via `qm config 201` |
| 202 | k8s-node2 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) |
| 203 | k8s-node3 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) |
| 204 | k8s-node4 | running | 8 | 24GB* | vmbr1:vlan20 | 256G | Worker. *Inferred from k8s allocatable (~22 GiB) |
| 220 | docker-registry | running | 4 | 4GB | vmbr1:vlan20 | 64G | MAC DE:AD:BE:EF:22:22 (10.0.20.10) |
| 300 | Windows10 | running | 16 | 8GB | vmbr0 | 100G | Windows VM |
| 9000 | truenas | running | 16 | 16GB | vmbr1:vlan10 | 32G+7x256G+1T | NFS (10.0.10.15) |

View file

@ -313,6 +313,16 @@ resource "kubernetes_deployment" "annas-archive-stacks" {
container {
image = "zelest/stacks:latest"
name = "annas-archive-stacks"
resources {
requests = {
cpu = "10m"
memory = "192Mi"
}
limits = {
cpu = "500m"
memory = "384Mi"
}
}
port {
container_port = 7788
}

View file

@ -71,11 +71,11 @@ resource "kubernetes_deployment" "dashy" {
resources {
requests = {
cpu = "50m"
memory = "128Mi"
memory = "512Mi"
}
limits = {
cpu = "500m"
memory = "512Mi"
memory = "1Gi"
}
}
port {

View file

@ -3,6 +3,9 @@
resource "kubernetes_namespace" "descheduler" {
metadata {
name = "descheduler"
labels = {
tier = local.tiers.cluster
}
}
}

View file

@ -93,8 +93,10 @@ module "k8s-node-template" {
cat <<'KUBELET_PATCH' | sudo tee -a /var/lib/kubelet/config.yaml
systemReserved:
memory: "512Mi"
cpu: "200m"
kubeReserved:
memory: "512Mi"
cpu: "200m"
evictionHard:
memory.available: "500Mi"
nodefs.available: "10%"

View file

@ -24,6 +24,13 @@ server:
limits:
cpu: "2"
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: server
ingress:
enabled: false
# hosts:
@ -46,3 +53,13 @@ worker:
limits:
cpu: "1"
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: worker
pdb:
enabled: true
maxUnavailable: 1

View file

@ -47,6 +47,16 @@ resource "kubernetes_deployment" "cloudflared" {
}
}
spec {
topology_spread_constraint {
max_skew = 1
topology_key = "kubernetes.io/hostname"
when_unsatisfiable = "ScheduleAnyway"
label_selector {
match_labels = {
app = "cloudflared"
}
}
}
container {
# image = "wisdomsky/cloudflared-web:latest"
image = "cloudflare/cloudflared"
@ -82,6 +92,21 @@ resource "kubernetes_deployment" "cloudflared" {
}
}
resource "kubernetes_pod_disruption_budget_v1" "cloudflared" {
metadata {
name = "cloudflared"
namespace = kubernetes_namespace.cloudflared.metadata[0].name
}
spec {
max_unavailable = "1"
selector {
match_labels = {
app = "cloudflared"
}
}
}
}
resource "kubernetes_service" "cloudflared" {
metadata {
name = "cloudflared"

View file

@ -46,6 +46,17 @@ agent:
lapi:
priorityClassName: "tier-1-cluster"
replicas: 3
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: crowdsec
type: lapi
pdb:
enabled: true
maxUnavailable: 1
extraSecrets:
dbPassword: "${DB_PASSWORD}"
storeCAPICredentialsInSecret: true

View file

@ -56,15 +56,37 @@ module "tls_secret" {
# Operator installed in mysql-operator namespace (toleration for control-plane).
# Init containers are slow (~20 min each) due to mysqlsh plugin loading.
resource "kubernetes_namespace" "mysql_operator" {
metadata {
name = "mysql-operator"
labels = {
tier = "1-cluster"
}
}
}
resource "helm_release" "mysql_operator" {
namespace = "mysql-operator"
create_namespace = true
namespace = kubernetes_namespace.mysql_operator.metadata[0].name
create_namespace = false
name = "mysql-operator"
timeout = 300
repository = "https://mysql.github.io/mysql-operator/"
chart = "mysql-operator"
version = "2.2.7"
values = [yamlencode({
resources = {
requests = {
cpu = "100m"
memory = "256Mi"
}
limits = {
cpu = "500m"
memory = "512Mi"
}
}
})]
}
# The mysql-sidecar ClusterRole created by the Helm chart is missing

View file

@ -615,8 +615,8 @@ resource "kubernetes_manifest" "generate_resourcequota_by_tier" {
hard = {
"requests.cpu" = "8"
"requests.memory" = "8Gi"
"limits.cpu" = "48"
"limits.memory" = "96Gi"
"limits.cpu" = "16"
"limits.memory" = "32Gi"
pods = "40"
}
}

View file

@ -14,6 +14,16 @@ resource "helm_release" "caretta" {
victoria-metrics-single = {
enabled = false
}
resources = {
requests = {
cpu = "10m"
memory = "300Mi"
}
limits = {
cpu = "200m"
memory = "512Mi"
}
}
})]
}

View file

@ -9,6 +9,15 @@ resources:
limits:
cpu: 500m
memory: 512Mi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: grafana
podDisruptionBudget:
maxUnavailable: 1
persistence:
enabled: false # using external mysql
existingClaim: "grafana-pvc"

View file

@ -211,8 +211,8 @@ resource "kubernetes_resource_quota" "monitoring" {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.cpu" = "120"
"limits.memory" = "240Gi"
"limits.cpu" = "64"
"limits.memory" = "64Gi"
pods = "100"
}
}

View file

@ -13,7 +13,6 @@ resource "kubernetes_namespace" "nvidia" {
labels = {
"istio-injection" : "disabled"
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}

View file

@ -18,7 +18,6 @@ resource "kubernetes_namespace" "realestate-crawler" {
labels = {
"istio-injection" : "disabled"
tier = local.tiers.aux
"resource-governance/custom-quota" = "true"
}
}
}
@ -321,6 +320,16 @@ resource "kubernetes_deployment" "realestate-crawler-celery" {
image = "viktorbarzin/realestatecrawler:latest"
image_pull_policy = "Always"
command = ["python", "-m", "celery", "-A", "celery_app", "worker", "--loglevel=info", "--pool=threads"]
resources {
requests = {
cpu = "50m"
memory = "512Mi"
}
limits = {
cpu = "1"
memory = "3Gi"
}
}
port {
name = "metrics"
container_port = 9090

View file

@ -31,6 +31,16 @@ resource "kubernetes_deployment" "flaresolverr" {
image = "ghcr.io/flaresolverr/flaresolverr:latest"
name = "flaresolverr"
resources {
requests = {
cpu = "10m"
memory = "150Mi"
}
limits = {
cpu = "500m"
memory = "384Mi"
}
}
port {
container_port = 8191
}

View file

@ -53,6 +53,16 @@ resource "kubernetes_deployment" "prowlarr" {
image = "lscr.io/linuxserver/prowlarr:latest"
name = "prowlarr"
resources {
requests = {
cpu = "10m"
memory = "192Mi"
}
limits = {
cpu = "500m"
memory = "384Mi"
}
}
port {
container_port = 9696
}

View file

@ -75,7 +75,6 @@ resource "kubernetes_namespace" "trading-bot" {
name = "trading-bot"
labels = {
tier = local.tiers.edge
"resource-governance/custom-quota" = "true"
}
}
}

View file

@ -48,8 +48,8 @@ resource "kubernetes_resource_quota" "woodpecker" {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.cpu" = "64"
"limits.memory" = "128Gi"
"limits.cpu" = "16"
"limits.memory" = "32Gi"
pods = "60"
}
}