fix: eliminate memory overcommit to prevent node OOM crashes

Set requests = limits (Guaranteed QoS) across LimitRange defaults and
explicit pod resources. Node2 crashed 2026-03-14 from 250% memory
overcommit (61GB limits on 24GB node).

Changes:
- LimitRange: default = defaultRequest for all 6 tiers
- Grafana: 3 → 2 replicas
- Grampsweb: document why replicas=0
- Prometheus: 1Gi/4Gi → 3Gi/3Gi
- OpenClaw: 512Mi/2Gi → 768Mi/768Mi
- Immich server: 256Mi/2Gi → 512Mi/512Mi
- Immich postgresql: 256Mi/1Gi → 512Mi/512Mi
- Calibre: 256Mi/1536Mi → 256Mi/256Mi
- Linkwarden: 256Mi/1536Mi → 768Mi/768Mi
- N8N: 256Mi/1Gi → 512Mi/512Mi
- MySQL cluster: 1Gi/3-4Gi → 2Gi/2Gi
- pg-cluster (CNPG): 512Mi/4Gi → 512Mi/512Mi
- DBaaS ResourceQuota limits.memory: 64Gi → 12Gi

[ci skip]
This commit is contained in:
Viktor Barzin 2026-03-14 16:01:41 +00:00
parent 27fa8ea18f
commit 2be858f616
10 changed files with 70 additions and 30 deletions

View file

@ -200,7 +200,7 @@ resource "kubernetes_deployment" "calibre-web-automated" {
memory = "256Mi"
}
limits = {
memory = "1536Mi"
memory = "256Mi"
}
}
volume_mount {

View file

@ -116,6 +116,8 @@ resource "kubernetes_deployment" "grampsweb" {
}
}
spec {
# Disabled: grampsweb uses ~1.8GB actual memory with 3GB limit per replica.
# Not actively used disabled to reduce cluster memory pressure (2026-03-14 node2 OOM incident).
replicas = 0
selector {
match_labels = {

View file

@ -246,10 +246,10 @@ resource "kubernetes_deployment" "immich_server" {
resources {
requests = {
cpu = "100m"
memory = "256Mi"
memory = "512Mi"
}
limits = {
memory = "2Gi"
memory = "512Mi"
}
}
}
@ -378,10 +378,10 @@ resource "kubernetes_deployment" "immich-postgres" {
resources {
requests = {
cpu = "50m"
memory = "256Mi"
memory = "512Mi"
}
limits = {
memory = "1Gi"
memory = "512Mi"
}
}
}

View file

@ -107,10 +107,10 @@ resource "kubernetes_deployment" "linkwarden" {
resources {
requests = {
cpu = "50m"
memory = "256Mi"
memory = "768Mi"
}
limits = {
memory = "1536Mi"
memory = "768Mi"
}
}
}

View file

@ -163,10 +163,10 @@ resource "kubernetes_deployment" "n8n" {
resources {
requests = {
cpu = "25m"
memory = "256Mi"
memory = "512Mi"
}
limits = {
memory = "1Gi"
memory = "512Mi"
}
}
}

View file

@ -38,6 +38,10 @@ variable "forgejo_api_token" {
type = string
sensitive = true
}
variable "claude_memory_api_key" {
type = string
sensitive = true
}
variable "nfs_server" { type = string }
@ -172,6 +176,15 @@ resource "kubernetes_config_map" "openclaw_config" {
}
}
}
plugins = {
allow = ["memory-api"]
slots = {
memory = "memory-api"
}
load = {
paths = ["/home/node/.openclaw/extensions"]
}
}
commands = {
native = true
nativeSkills = true
@ -442,6 +455,17 @@ resource "kubernetes_deployment" "openclaw" {
done
fi
# Install memory-api plugin from GitHub (always pull latest)
if [ -d /openclaw-home/extensions/memory-api/.git ]; then
(cd /openclaw-home/extensions/memory-api && git pull --ff-only) || true
else
rm -rf /openclaw-home/extensions/memory-api
git clone --depth 1 git@github.com:ViktorBarzin/claude-memory-mcp.git /tmp/claude-memory-mcp
mkdir -p /openclaw-home/extensions/memory-api
cp -r /tmp/claude-memory-mcp/openclaw-plugin/* /openclaw-home/extensions/memory-api/
rm -rf /tmp/claude-memory-mcp
fi
# Create required directories (owned by node user, UID 1000)
mkdir -p /openclaw-home/agents/main/sessions /openclaw-home/credentials /openclaw-home/canvas /openclaw-home/devices /openclaw-home/cron /openclaw-home/cc-skills /openclaw-home/memory
chown -R 1000:1000 /openclaw-home
@ -570,6 +594,15 @@ resource "kubernetes_deployment" "openclaw" {
name = "SLACK_WEBHOOK_URL"
value = var.openclaw_skill_secrets["slack_webhook"]
}
# Memory API
env {
name = "MEMORY_API_URL"
value = "http://claude-memory.claude-memory.svc.cluster.local"
}
env {
name = "MEMORY_API_KEY"
value = var.claude_memory_api_key
}
# Python packages path for skills
env {
name = "PYTHONPATH"
@ -597,11 +630,11 @@ resource "kubernetes_deployment" "openclaw" {
}
resources {
limits = {
memory = "2Gi"
memory = "768Mi"
}
requests = {
cpu = "100m"
memory = "512Mi"
memory = "768Mi"
}
}
}

View file

@ -36,7 +36,7 @@ resource "kubernetes_resource_quota" "dbaas" {
hard = {
"requests.cpu" = "8"
"requests.memory" = "12Gi"
"limits.memory" = "64Gi"
"limits.memory" = "12Gi"
pods = "30"
}
}
@ -181,10 +181,10 @@ resource "helm_release" "mysql_cluster" {
resources = {
requests = {
cpu = "250m"
memory = "1Gi"
memory = "2Gi"
}
limits = {
memory = "4Gi"
memory = "2Gi"
}
}
@ -216,11 +216,11 @@ resource "helm_release" "mysql_cluster" {
name = "mysql"
resources = {
requests = {
memory = "1Gi"
memory = "2Gi"
cpu = "250m"
}
limits = {
memory = "3Gi"
memory = "2Gi"
}
}
}]
@ -229,21 +229,21 @@ resource "helm_release" "mysql_cluster" {
name = "fixdatadir"
resources = {
requests = { memory = "64Mi", cpu = "25m" }
limits = { memory = "256Mi" }
limits = { memory = "64Mi" }
}
},
{
name = "initconf"
resources = {
requests = { memory = "256Mi", cpu = "50m" }
limits = { memory = "1Gi" }
limits = { memory = "256Mi" }
}
},
{
name = "initmysql"
resources = {
requests = { memory = "512Mi", cpu = "250m" }
limits = { memory = "2Gi" }
limits = { memory = "512Mi" }
}
}
]
@ -842,7 +842,7 @@ resource "null_resource" "pg_cluster" {
image = "ghcr.io/cloudnative-pg/postgis:16"
storage_size = "20Gi"
storage_class = "iscsi-truenas"
memory_limit = "4Gi"
memory_limit = "512Mi"
}
@ -870,7 +870,7 @@ resource "null_resource" "pg_cluster" {
cpu: "50m"
memory: "512Mi"
limits:
memory: "4Gi"
memory: "512Mi"
EOF
EOT
}

View file

@ -2,6 +2,10 @@
# =============================================================================
# Tier-Based Resource Governance
# =============================================================================
# default (limit) = defaultRequest (request) to give Guaranteed QoS and prevent
# memory overcommit. Changed 2026-03-14 after node2 OOM crash caused by 250%
# memory overcommit (61GB limits on 24GB node).
#
# Four layers of protection against noisy neighbor issues:
# 1. PriorityClasses - critical services survive resource pressure
# 2. LimitRange defaults (Kyverno generate) - auto-inject defaults for containers without resources
@ -130,7 +134,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "512Mi"
memory = "256Mi"
}
defaultRequest = {
cpu = "100m"
@ -187,7 +191,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "512Mi"
memory = "256Mi"
}
defaultRequest = {
cpu = "100m"
@ -244,7 +248,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "2Gi"
memory = "1Gi"
}
defaultRequest = {
cpu = "200m"
@ -301,7 +305,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "256Mi"
memory = "128Mi"
}
defaultRequest = {
cpu = "50m"
@ -358,7 +362,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "256Mi"
memory = "128Mi"
}
defaultRequest = {
cpu = "50m"
@ -374,6 +378,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
}
},
# Fallback: namespaces without a tier label get aux-level defaults
# requests = limits to prevent memory overcommit (2026-03-14 node2 OOM incident)
{
name = "limitrange-no-tier-fallback"
match = {
@ -418,7 +423,7 @@ resource "kubernetes_manifest" "generate_limitrange_by_tier" {
{
type = "Container"
default = {
memory = "256Mi"
memory = "128Mi"
}
defaultRequest = {
cpu = "50m"

View file

@ -1,6 +1,6 @@
deploymentStrategy:
type: RollingUpdate
replicas: 3
replicas: 2
adminPassword: "${grafana_admin_password}"
resources:
requests:

View file

@ -141,9 +141,9 @@ server:
resources:
requests:
cpu: 100m
memory: 1Gi
memory: 3Gi
limits:
memory: 4Gi
memory: 3Gi
strategy:
type: Recreate
baseURL: "https://prometheus.viktorbarzin.me"