infra/stacks/platform/modules/monitoring/main.tf
Viktor Barzin d352d6e7f8 resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
  with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
  (outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
  explicit resources)

Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
  labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification

Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16

Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template

Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00

219 lines
5.4 KiB
HCL

variable "tls_secret_name" {}
variable "alertmanager_account_password" {}
variable "idrac_host" {
default = "192.168.1.4"
}
variable "idrac_username" {
default = "root"
}
variable "idrac_password" {
default = "calvin"
sensitive = true
}
variable "alertmanager_slack_api_url" {}
variable "tiny_tuya_service_secret" {
type = string
sensitive = true
}
variable "haos_api_token" {
type = string
sensitive = true
}
variable "pve_password" {
type = string
sensitive = true
}
variable "grafana_db_password" {
type = string
sensitive = true
}
variable "grafana_admin_password" {
type = string
sensitive = true
}
variable "tier" { type = string }
variable "mysql_host" { type = string }
resource "kubernetes_namespace" "monitoring" {
metadata {
name = "monitoring"
labels = {
"istio-injection" : "disabled"
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.monitoring.metadata[0].name
tls_secret_name = var.tls_secret_name
}
# Terraform get angry with the 30k values file :/ use ansible until solved
# resource "helm_release" "ups_prometheus_snmp_exporter" {
# namespace = kubernetes_namespace.monitoring.metadata[0].name
# create_namespace = true
# name = "ups_prometheus_exporter"
# repository = "https://prometheus-community.github.io/helm-charts"
# chart = "prometheus-snmp-exporter"
# values = [file("${path.module}/ups_snmp_values.yaml")]
# }
resource "kubernetes_cron_job_v1" "monitor_prom" {
metadata {
name = "monitor-prometheus"
}
spec {
concurrency_policy = "Replace"
failed_jobs_history_limit = 5
schedule = "*/30 * * * *"
job_template {
metadata {
}
spec {
template {
metadata {
}
spec {
container {
name = "monitor-prometheus"
image = "alpine"
command = ["/bin/sh", "-c", "apk add --update curl && curl --connect-timeout 2 prometheus-server.monitoring.svc.cluster.local || curl https://webhook.viktorbarzin.me/fb/message-viktor -d 'Prometheus is down!'"]
}
}
}
}
}
}
}
resource "kubernetes_manifest" "status_redirect_middleware" {
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "status-redirect"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec = {
redirectRegex = {
regex = ".*"
replacement = "https://hetrixtools.com/r/38981b548b5d38b052aca8d01285a3f3/"
permanent = true
}
}
}
}
resource "kubernetes_ingress_v1" "status" {
metadata {
name = "hetrix-redirect-ingress"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-status-redirect@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["status.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "status.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "not-used"
port {
number = 80 # redirected by middleware
}
}
}
}
}
}
}
}
resource "kubernetes_manifest" "yotovski_redirect_middleware" {
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "yotovski-redirect"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec = {
redirectRegex = {
regex = ".*"
replacement = "https://hetrixtools.com/r/2ba9d7a5e017794db0fd91f0115a8b3b/"
permanent = true
}
}
}
}
resource "kubernetes_ingress_v1" "status_yotovski" {
metadata {
name = "hetrix-yotovski-redirect-ingress"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-yotovski-redirect@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["yotovski-status.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "yotovski-status.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "not-used" # redirected by middleware
port {
number = 80
}
}
}
}
}
}
}
}
# Custom ResourceQuota for monitoring — larger than the default 1-cluster tier quota
# because monitoring runs 29+ pods (Prometheus, Grafana, Loki, Alloy, exporters, etc.)
resource "kubernetes_resource_quota" "monitoring" {
metadata {
name = "monitoring-quota"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.cpu" = "64"
"limits.memory" = "64Gi"
pods = "100"
}
}
}