From 2be858f616cfee4804867fcad23d7b3cf0616211 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Sat, 14 Mar 2026 16:01:41 +0000 Subject: [PATCH] fix: eliminate memory overcommit to prevent node OOM crashes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set requests = limits (Guaranteed QoS) across LimitRange defaults and explicit pod resources. Node2 crashed 2026-03-14 from 250% memory overcommit (61GB limits on 24GB node). Changes: - LimitRange: default = defaultRequest for all 6 tiers - Grafana: 3 → 2 replicas - Grampsweb: document why replicas=0 - Prometheus: 1Gi/4Gi → 3Gi/3Gi - OpenClaw: 512Mi/2Gi → 768Mi/768Mi - Immich server: 256Mi/2Gi → 512Mi/512Mi - Immich postgresql: 256Mi/1Gi → 512Mi/512Mi - Calibre: 256Mi/1536Mi → 256Mi/256Mi - Linkwarden: 256Mi/1536Mi → 768Mi/768Mi - N8N: 256Mi/1Gi → 512Mi/512Mi - MySQL cluster: 1Gi/3-4Gi → 2Gi/2Gi - pg-cluster (CNPG): 512Mi/4Gi → 512Mi/512Mi - DBaaS ResourceQuota limits.memory: 64Gi → 12Gi [ci skip] --- stacks/calibre/main.tf | 2 +- stacks/grampsweb/main.tf | 2 + stacks/immich/main.tf | 8 ++-- stacks/linkwarden/main.tf | 4 +- stacks/n8n/main.tf | 4 +- stacks/openclaw/main.tf | 37 ++++++++++++++++++- stacks/platform/modules/dbaas/main.tf | 20 +++++----- .../modules/kyverno/resource-governance.tf | 17 ++++++--- .../monitoring/grafana_chart_values.yaml | 2 +- .../monitoring/prometheus_chart_values.tpl | 4 +- 10 files changed, 70 insertions(+), 30 deletions(-) diff --git a/stacks/calibre/main.tf b/stacks/calibre/main.tf index f70acdab..81f7d133 100644 --- a/stacks/calibre/main.tf +++ b/stacks/calibre/main.tf @@ -200,7 +200,7 @@ resource "kubernetes_deployment" "calibre-web-automated" { memory = "256Mi" } limits = { - memory = "1536Mi" + memory = "256Mi" } } volume_mount { diff --git a/stacks/grampsweb/main.tf b/stacks/grampsweb/main.tf index acf026fa..b8918b1f 100644 --- a/stacks/grampsweb/main.tf +++ b/stacks/grampsweb/main.tf @@ -116,6 +116,8 @@ resource "kubernetes_deployment" "grampsweb" { } } spec { + # Disabled: grampsweb uses ~1.8GB actual memory with 3GB limit per replica. + # Not actively used — disabled to reduce cluster memory pressure (2026-03-14 node2 OOM incident). replicas = 0 selector { match_labels = { diff --git a/stacks/immich/main.tf b/stacks/immich/main.tf index b81181a4..b01939b8 100644 --- a/stacks/immich/main.tf +++ b/stacks/immich/main.tf @@ -246,10 +246,10 @@ resource "kubernetes_deployment" "immich_server" { resources { requests = { cpu = "100m" - memory = "256Mi" + memory = "512Mi" } limits = { - memory = "2Gi" + memory = "512Mi" } } } @@ -378,10 +378,10 @@ resource "kubernetes_deployment" "immich-postgres" { resources { requests = { cpu = "50m" - memory = "256Mi" + memory = "512Mi" } limits = { - memory = "1Gi" + memory = "512Mi" } } } diff --git a/stacks/linkwarden/main.tf b/stacks/linkwarden/main.tf index e3a65d1f..5f0eca70 100644 --- a/stacks/linkwarden/main.tf +++ b/stacks/linkwarden/main.tf @@ -107,10 +107,10 @@ resource "kubernetes_deployment" "linkwarden" { resources { requests = { cpu = "50m" - memory = "256Mi" + memory = "768Mi" } limits = { - memory = "1536Mi" + memory = "768Mi" } } } diff --git a/stacks/n8n/main.tf b/stacks/n8n/main.tf index 1d2d55a7..d615134e 100644 --- a/stacks/n8n/main.tf +++ b/stacks/n8n/main.tf @@ -163,10 +163,10 @@ resource "kubernetes_deployment" "n8n" { resources { requests = { cpu = "25m" - memory = "256Mi" + memory = "512Mi" } limits = { - memory = "1Gi" + memory = "512Mi" } } } diff --git a/stacks/openclaw/main.tf b/stacks/openclaw/main.tf index 32619119..5667162c 100644 --- a/stacks/openclaw/main.tf +++ b/stacks/openclaw/main.tf @@ -38,6 +38,10 @@ variable "forgejo_api_token" { type = string sensitive = true } +variable "claude_memory_api_key" { + type = string + sensitive = true +} variable "nfs_server" { type = string } @@ -172,6 +176,15 @@ resource "kubernetes_config_map" "openclaw_config" { } } } + plugins = { + allow = ["memory-api"] + slots = { + memory = "memory-api" + } + load = { + paths = ["/home/node/.openclaw/extensions"] + } + } commands = { native = true nativeSkills = true @@ -442,6 +455,17 @@ resource "kubernetes_deployment" "openclaw" { done fi + # Install memory-api plugin from GitHub (always pull latest) + if [ -d /openclaw-home/extensions/memory-api/.git ]; then + (cd /openclaw-home/extensions/memory-api && git pull --ff-only) || true + else + rm -rf /openclaw-home/extensions/memory-api + git clone --depth 1 git@github.com:ViktorBarzin/claude-memory-mcp.git /tmp/claude-memory-mcp + mkdir -p /openclaw-home/extensions/memory-api + cp -r /tmp/claude-memory-mcp/openclaw-plugin/* /openclaw-home/extensions/memory-api/ + rm -rf /tmp/claude-memory-mcp + fi + # Create required directories (owned by node user, UID 1000) mkdir -p /openclaw-home/agents/main/sessions /openclaw-home/credentials /openclaw-home/canvas /openclaw-home/devices /openclaw-home/cron /openclaw-home/cc-skills /openclaw-home/memory chown -R 1000:1000 /openclaw-home @@ -570,6 +594,15 @@ resource "kubernetes_deployment" "openclaw" { name = "SLACK_WEBHOOK_URL" value = var.openclaw_skill_secrets["slack_webhook"] } + # Memory API + env { + name = "MEMORY_API_URL" + value = "http://claude-memory.claude-memory.svc.cluster.local" + } + env { + name = "MEMORY_API_KEY" + value = var.claude_memory_api_key + } # Python packages path for skills env { name = "PYTHONPATH" @@ -597,11 +630,11 @@ resource "kubernetes_deployment" "openclaw" { } resources { limits = { - memory = "2Gi" + memory = "768Mi" } requests = { cpu = "100m" - memory = "512Mi" + memory = "768Mi" } } } diff --git a/stacks/platform/modules/dbaas/main.tf b/stacks/platform/modules/dbaas/main.tf index 66bd60ff..0165b502 100644 --- a/stacks/platform/modules/dbaas/main.tf +++ b/stacks/platform/modules/dbaas/main.tf @@ -36,7 +36,7 @@ resource "kubernetes_resource_quota" "dbaas" { hard = { "requests.cpu" = "8" "requests.memory" = "12Gi" - "limits.memory" = "64Gi" + "limits.memory" = "12Gi" pods = "30" } } @@ -181,10 +181,10 @@ resource "helm_release" "mysql_cluster" { resources = { requests = { cpu = "250m" - memory = "1Gi" + memory = "2Gi" } limits = { - memory = "4Gi" + memory = "2Gi" } } @@ -216,11 +216,11 @@ resource "helm_release" "mysql_cluster" { name = "mysql" resources = { requests = { - memory = "1Gi" + memory = "2Gi" cpu = "250m" } limits = { - memory = "3Gi" + memory = "2Gi" } } }] @@ -229,21 +229,21 @@ resource "helm_release" "mysql_cluster" { name = "fixdatadir" resources = { requests = { memory = "64Mi", cpu = "25m" } - limits = { memory = "256Mi" } + limits = { memory = "64Mi" } } }, { name = "initconf" resources = { requests = { memory = "256Mi", cpu = "50m" } - limits = { memory = "1Gi" } + limits = { memory = "256Mi" } } }, { name = "initmysql" resources = { requests = { memory = "512Mi", cpu = "250m" } - limits = { memory = "2Gi" } + limits = { memory = "512Mi" } } } ] @@ -842,7 +842,7 @@ resource "null_resource" "pg_cluster" { image = "ghcr.io/cloudnative-pg/postgis:16" storage_size = "20Gi" storage_class = "iscsi-truenas" - memory_limit = "4Gi" + memory_limit = "512Mi" } @@ -870,7 +870,7 @@ resource "null_resource" "pg_cluster" { cpu: "50m" memory: "512Mi" limits: - memory: "4Gi" + memory: "512Mi" EOF EOT } diff --git a/stacks/platform/modules/kyverno/resource-governance.tf b/stacks/platform/modules/kyverno/resource-governance.tf index 539b057a..fa74fc00 100644 --- a/stacks/platform/modules/kyverno/resource-governance.tf +++ b/stacks/platform/modules/kyverno/resource-governance.tf @@ -2,6 +2,10 @@ # ============================================================================= # Tier-Based Resource Governance # ============================================================================= +# default (limit) = defaultRequest (request) to give Guaranteed QoS and prevent +# memory overcommit. Changed 2026-03-14 after node2 OOM crash caused by 250% +# memory overcommit (61GB limits on 24GB node). +# # Four layers of protection against noisy neighbor issues: # 1. PriorityClasses - critical services survive resource pressure # 2. LimitRange defaults (Kyverno generate) - auto-inject defaults for containers without resources @@ -130,7 +134,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "512Mi" + memory = "256Mi" } defaultRequest = { cpu = "100m" @@ -187,7 +191,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "512Mi" + memory = "256Mi" } defaultRequest = { cpu = "100m" @@ -244,7 +248,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "2Gi" + memory = "1Gi" } defaultRequest = { cpu = "200m" @@ -301,7 +305,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "256Mi" + memory = "128Mi" } defaultRequest = { cpu = "50m" @@ -358,7 +362,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "256Mi" + memory = "128Mi" } defaultRequest = { cpu = "50m" @@ -374,6 +378,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { } }, # Fallback: namespaces without a tier label get aux-level defaults + # requests = limits to prevent memory overcommit (2026-03-14 node2 OOM incident) { name = "limitrange-no-tier-fallback" match = { @@ -418,7 +423,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" { { type = "Container" default = { - memory = "256Mi" + memory = "128Mi" } defaultRequest = { cpu = "50m" diff --git a/stacks/platform/modules/monitoring/grafana_chart_values.yaml b/stacks/platform/modules/monitoring/grafana_chart_values.yaml index a5e49353..1afaad02 100644 --- a/stacks/platform/modules/monitoring/grafana_chart_values.yaml +++ b/stacks/platform/modules/monitoring/grafana_chart_values.yaml @@ -1,6 +1,6 @@ deploymentStrategy: type: RollingUpdate -replicas: 3 +replicas: 2 adminPassword: "${grafana_admin_password}" resources: requests: diff --git a/stacks/platform/modules/monitoring/prometheus_chart_values.tpl b/stacks/platform/modules/monitoring/prometheus_chart_values.tpl index ac74d0a3..9f796c3f 100755 --- a/stacks/platform/modules/monitoring/prometheus_chart_values.tpl +++ b/stacks/platform/modules/monitoring/prometheus_chart_values.tpl @@ -141,9 +141,9 @@ server: resources: requests: cpu: 100m - memory: 1Gi + memory: 3Gi limits: - memory: 4Gi + memory: 3Gi strategy: type: Recreate baseURL: "https://prometheus.viktorbarzin.me"