Phase 1 - OOM fixes: - dashy: increase memory limit 512Mi→1Gi (was at 99% utilization) - caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%) - mysql-operator: add Helm resource values 256Mi/512Mi, create namespace with tier label (was at 92% of LimitRange default) - prowlarr, flaresolverr, annas-archive-stacks: add explicit resources (outgrowing 256Mi LimitRange defaults) - real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no explicit resources) Phase 2 - Close quota gaps: - nvidia, real-estate-crawler, trading-bot: remove custom-quota=true labels so Kyverno generates tier-appropriate quotas - descheduler: add tier=1-cluster label for proper classification Phase 3 - Reduce excessive quotas: - monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64 - woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16 - GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16 Phase 4 - Kubelet protection: - Add cpu: 200m to systemReserved and kubeReserved in kubelet template Phase 5 - HA improvements: - cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1) - grafana: add topology spread + PDB via Helm values - crowdsec LAPI: add topology spread + PDB via Helm values - authentik server: add topology spread via Helm values - authentik worker: add topology spread + PDB via Helm values
62 lines
1.3 KiB
HCL
62 lines
1.3 KiB
HCL
resource "helm_release" "caretta" {
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
create_namespace = true
|
|
name = "caretta"
|
|
|
|
repository = "https://helm.groundcover.com/"
|
|
chart = "caretta"
|
|
version = "0.0.16"
|
|
|
|
values = [yamlencode({
|
|
grafana = {
|
|
enabled = false
|
|
}
|
|
victoria-metrics-single = {
|
|
enabled = false
|
|
}
|
|
resources = {
|
|
requests = {
|
|
cpu = "10m"
|
|
memory = "300Mi"
|
|
}
|
|
limits = {
|
|
cpu = "200m"
|
|
memory = "512Mi"
|
|
}
|
|
}
|
|
})]
|
|
}
|
|
|
|
resource "kubernetes_service" "caretta_metrics" {
|
|
metadata {
|
|
name = "caretta-metrics"
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
labels = {
|
|
app = "caretta"
|
|
}
|
|
}
|
|
spec {
|
|
selector = {
|
|
app = "caretta"
|
|
}
|
|
port {
|
|
name = "metrics"
|
|
port = 7117
|
|
target_port = 7117
|
|
protocol = "TCP"
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_config_map" "caretta_grafana_dashboard" {
|
|
metadata {
|
|
name = "caretta-grafana-dashboard"
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
labels = {
|
|
grafana_dashboard = "1"
|
|
}
|
|
}
|
|
data = {
|
|
"caretta-dashboard.json" = file("${path.module}/dashboards/caretta-dashboard.json")
|
|
}
|
|
}
|