infra/stacks/dashy/main.tf
Viktor Barzin 407b33abd6
resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
  with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
  (outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
  explicit resources)

Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
  labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification

Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16

Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template

Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00

127 lines
2.5 KiB
HCL

variable "tls_secret_name" {
type = string
sensitive = true
}
module "tls_secret" {
source = "../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.dashy.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "dashy" {
metadata {
name = "dashy"
labels = {
"istio-injection" : "disabled"
tier = local.tiers.aux
}
}
}
resource "kubernetes_config_map" "config" {
metadata {
name = "config"
namespace = kubernetes_namespace.dashy.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"conf.yml" = file("${path.module}/conf.yml")
}
}
resource "kubernetes_deployment" "dashy" {
metadata {
name = "dashy"
namespace = kubernetes_namespace.dashy.metadata[0].name
labels = {
app = "dashy"
tier = local.tiers.aux
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "dashy"
}
}
template {
metadata {
annotations = {
# "diun.enable" = "true"
}
labels = {
app = "dashy"
}
}
spec {
container {
image = "lissy93/dashy:latest"
name = "dashy"
resources {
requests = {
cpu = "50m"
memory = "512Mi"
}
limits = {
cpu = "500m"
memory = "1Gi"
}
}
port {
container_port = 8080
}
volume_mount {
name = "config"
mount_path = "/app/user-data/"
}
}
volume {
name = "config"
config_map {
name = "config"
}
}
}
}
}
}
resource "kubernetes_service" "dashy" {
metadata {
name = "dashy"
namespace = kubernetes_namespace.dashy.metadata[0].name
labels = {
app = "dashy"
}
}
spec {
selector = {
app = "dashy"
}
port {
name = "http"
port = 80
target_port = 8080
}
}
}
module "ingress" {
source = "../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.dashy.metadata[0].name
name = "dashy"
tls_secret_name = var.tls_secret_name
protected = true # hidden as we use homepage now
}