Full cluster resource audit: cross-referenced Goldilocks VPA recommendations, live kubectl top metrics, and Terraform definitions for 100+ containers. Critical fixes: - dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit - stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit - traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi Added explicit resources to ~40 containers that had none: - audiobookshelf, changedetection, cyberchef, dawarich, diun, echo, excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n, navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor, tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver, cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard, k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server, immich-postgresql, osrm-foot GPU containers: added CPU/mem alongside GPU limits: - ollama: removed CPU/mem limits (models vary in size), keep GPU only - frigate: req 500m/2Gi, lim 4/8Gi + GPU - immich-ml: req 100m/1Gi, lim 2/4Gi + GPU Right-sized ~25 over-provisioned containers: - kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi) - onlyoffice: CPU 8 → 2 (VPA upper 45m) - realestate-crawler-api: CPU 2000m → 250m - blog/travel-blog/webhook-handler: 500m → 100m - coturn/health/plotting-book: reduced to match actual usage Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
125 lines
2.4 KiB
HCL
125 lines
2.4 KiB
HCL
variable "tls_secret_name" { type = string }
|
|
|
|
|
|
resource "kubernetes_namespace" "tor-proxy" {
|
|
metadata {
|
|
name = "tor-proxy"
|
|
labels = {
|
|
"istio-injection" : "disabled"
|
|
tier = local.tiers.aux
|
|
}
|
|
}
|
|
}
|
|
|
|
module "tls_secret" {
|
|
source = "../../modules/kubernetes/setup_tls_secret"
|
|
namespace = "tor-proxy"
|
|
tls_secret_name = var.tls_secret_name
|
|
}
|
|
|
|
# resource "kubernetes_config_map" "tor_config" {
|
|
# metadata {
|
|
# name = "tor-config"
|
|
# namespace = "tor-proxy"
|
|
# annotations = {
|
|
# "reloader.stakater.com/match" = "true"
|
|
# }
|
|
# }
|
|
|
|
# data = {
|
|
# "torrc" = file("${path.module}/.torrc")
|
|
# }
|
|
# }
|
|
|
|
resource "kubernetes_deployment" "tor-proxy" {
|
|
metadata {
|
|
name = "tor-proxy"
|
|
namespace = "tor-proxy"
|
|
labels = {
|
|
app = "tor-proxy"
|
|
tier = local.tiers.aux
|
|
}
|
|
annotations = {
|
|
"reloader.stakater.com/search" = "true"
|
|
}
|
|
}
|
|
spec {
|
|
replicas = 1
|
|
strategy {
|
|
type = "RollingUpdate"
|
|
}
|
|
selector {
|
|
match_labels = {
|
|
app = "tor-proxy"
|
|
}
|
|
}
|
|
template {
|
|
metadata {
|
|
labels = {
|
|
app = "tor-proxy"
|
|
}
|
|
}
|
|
spec {
|
|
container {
|
|
name = "tor-proxy"
|
|
image = "dperson/torproxy:latest"
|
|
port {
|
|
name = "http"
|
|
container_port = 8118
|
|
protocol = "TCP"
|
|
}
|
|
port {
|
|
name = "tor"
|
|
container_port = 9050
|
|
protocol = "TCP"
|
|
}
|
|
resources {
|
|
requests = {
|
|
cpu = "10m"
|
|
memory = "64Mi"
|
|
}
|
|
limits = {
|
|
cpu = "150m"
|
|
memory = "256Mi"
|
|
}
|
|
}
|
|
# volume_mount {
|
|
# name = "tor-config"
|
|
# mount_path = "/etc/tor/torrc"
|
|
# sub_path = "torrc"
|
|
# }
|
|
}
|
|
# volume {
|
|
# name = "tor-config"
|
|
# config_map {
|
|
# name = kubernetes_config_map.tor_config.metadata[0].name
|
|
# }
|
|
# }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_service" "tor-proxy" {
|
|
metadata {
|
|
name = "tor-proxy"
|
|
namespace = "tor-proxy"
|
|
labels = {
|
|
"app" = "tor-proxy"
|
|
}
|
|
}
|
|
|
|
spec {
|
|
selector = {
|
|
app = "tor-proxy"
|
|
}
|
|
port {
|
|
name = "http"
|
|
port = 8118
|
|
}
|
|
port {
|
|
name = "tor"
|
|
port = 9050
|
|
}
|
|
}
|
|
}
|