infra/stacks/descheduler/main.tf
Viktor Barzin d352d6e7f8 resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
  with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
  (outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
  explicit resources)

Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
  labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification

Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16

Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template

Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00

92 lines
2 KiB
HCL

resource "kubernetes_namespace" "descheduler" {
metadata {
name = "descheduler"
labels = {
tier = local.tiers.cluster
}
}
}
resource "kubernetes_cluster_role" "descheduler" {
metadata {
name = "descheduler-cluster-role"
}
rule {
api_groups = [""]
resources = ["events"]
verbs = ["create", "update"]
}
rule {
api_groups = ["metrics.k8s.io"]
resources = ["nodes"]
verbs = ["get", "watch", "list"]
}
rule {
api_groups = [""]
resources = ["namespaces"]
verbs = ["get", "list", "watch"]
}
rule {
api_groups = ["metrics.k8s.io"]
resources = ["pods"]
verbs = ["get", "watch", "list", "delete"]
}
rule {
api_groups = [""]
resources = ["pods/eviction"]
verbs = ["create"]
}
rule {
api_groups = [""]
resources = ["scheduling.k8s.io"]
verbs = ["get", "watch", "list"]
}
rule {
api_groups = ["scheduling.k8s.io"]
resources = ["priorityclasses"]
verbs = ["get", "list", "watch"]
}
rule {
api_groups = ["policy"]
resources = ["poddisruptionbudgets"]
verbs = ["get", "list", "watch"]
}
}
resource "kubernetes_service_account" "descheduler" {
metadata {
name = "descheduler-sa"
namespace = kubernetes_namespace.descheduler.metadata[0].name
}
}
resource "kubernetes_cluster_role_binding" "descheduler" {
metadata {
name = "descheduler-cluster-role-binding"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "descheduler-cluster-role"
}
subject {
name = "descheduler-sa"
kind = "ServiceAccount"
namespace = kubernetes_namespace.descheduler.metadata[0].name
}
}
resource "helm_release" "descheduler" { # rename me
namespace = kubernetes_namespace.descheduler.metadata[0].name
name = "descheduler"
repository = "https://kubernetes-sigs.github.io/descheduler/"
chart = "descheduler"
values = [templatefile("${path.module}/values.yaml", {})]
}