infra/stacks/platform/modules/wireguard/main.tf
Viktor Barzin ca648ff9bb
[ci skip] right-size all pod resources based on VPA + live metrics audit
Full cluster resource audit: cross-referenced Goldilocks VPA recommendations,
live kubectl top metrics, and Terraform definitions for 100+ containers.

Critical fixes:
- dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit
- stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit
- traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi

Added explicit resources to ~40 containers that had none:
- audiobookshelf, changedetection, cyberchef, dawarich, diun, echo,
  excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n,
  navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor,
  tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver,
  cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard,
  k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server,
  immich-postgresql, osrm-foot

GPU containers: added CPU/mem alongside GPU limits:
- ollama: removed CPU/mem limits (models vary in size), keep GPU only
- frigate: req 500m/2Gi, lim 4/8Gi + GPU
- immich-ml: req 100m/1Gi, lim 2/4Gi + GPU

Right-sized ~25 over-provisioned containers:
- kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi)
- onlyoffice: CPU 8 → 2 (VPA upper 45m)
- realestate-crawler-api: CPU 2000m → 250m
- blog/travel-blog/webhook-handler: 500m → 100m
- coturn/health/plotting-book: reduced to match actual usage

Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
2026-03-01 19:18:50 +00:00

253 lines
5.9 KiB
HCL

variable "tls_secret_name" {}
variable "tier" { type = string }
variable "wg_0_conf" {}
variable "firewall_sh" {}
variable "wg_0_key" {}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.wireguard.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "wireguard" {
metadata {
name = "wireguard"
labels = {
tier = var.tier
}
}
}
resource "kubernetes_config_map" "wg_0_conf" {
metadata {
name = "wg0-conf"
namespace = kubernetes_namespace.wireguard.metadata[0].name
labels = {
app = "wireguard"
}
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"setup-firewall.sh" = var.firewall_sh
"wg0.conf" = format("%s%s", var.wg_0_conf, file("${path.module}/extra/clients.conf"))
}
}
resource "kubernetes_secret" "wg_0_key" {
metadata {
name = "wg0-key"
namespace = kubernetes_namespace.wireguard.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"wg0.key" = var.wg_0_key
# If thep rivate key changes the pub key must be updated manually
"wg-ui-config" = format("{\"PrivateKey\": \"%s\",\"PublicKey\": \"%s\",\"Users\": {}}", var.wg_0_key, "3OeDa6Z3Z6vPVxn/WKJujYL7DoDYPPpI5W+2glUYLHU=")
}
type = "generic"
}
resource "kubernetes_deployment" "wireguard" {
metadata {
name = "wireguard"
namespace = kubernetes_namespace.wireguard.metadata[0].name
labels = {
app = "wireguard"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
strategy {
rolling_update {
max_surge = "2"
max_unavailable = "0"
}
}
selector {
match_labels = {
app = "wireguard"
}
}
template {
metadata {
labels = {
app = "wireguard"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/port" = "9586"
}
}
spec {
init_container {
name = "sysctl-setup"
image = "busybox"
command = ["/bin/sh", "-c", "echo 1 > /proc/sys/net/ipv4/ip_forward"]
security_context {
privileged = true
}
}
container {
image = "sclevine/wg:latest"
name = "wireguard"
image_pull_policy = "IfNotPresent"
lifecycle {
post_start {
exec {
command = ["wg-quick", "up", "wg0"]
}
}
pre_stop {
exec {
command = ["wg-quick", "down", "wg0"]
}
}
}
command = ["tail", "-f", "/dev/null"]
port {
container_port = 51820
protocol = "UDP"
}
volume_mount {
name = "wg0-key"
mount_path = "/etc/wireguard/wg0.key"
sub_path = "wg0.key"
}
volume_mount {
name = "wg0-conf"
mount_path = "/etc/wireguard/wg0.conf"
sub_path = "wg0.conf"
}
volume_mount {
name = "wg0-conf"
mount_path = "/etc/wireguard/setup-firewall.sh"
sub_path = "setup-firewall.sh"
}
security_context {
capabilities {
add = ["NET_ADMIN", "SYS_MODULE"]
}
}
resources {
requests = {
cpu = "10m"
memory = "16Mi"
}
limits = {
cpu = "100m"
memory = "128Mi"
}
}
}
container {
name = "prometheus-exporter"
image = "mindflavor/prometheus-wireguard-exporter"
image_pull_policy = "IfNotPresent"
command = ["prometheus_wireguard_exporter", "-a", "true", "-v", "true", "-n", "/etc/wireguard/wg0.conf"]
volume_mount {
name = "wg0-conf"
mount_path = "/etc/wireguard/wg0.conf"
sub_path = "wg0.conf"
}
security_context {
capabilities {
add = ["NET_ADMIN"]
}
}
port {
container_port = 9586
protocol = "TCP"
}
resources {
requests = {
cpu = "10m"
memory = "16Mi"
}
limits = {
cpu = "50m"
memory = "64Mi"
}
}
}
volume {
name = "wg0-key"
secret {
secret_name = "wg0-key"
}
}
volume {
name = "wg0-conf"
config_map {
name = "wg0-conf"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "wireguard" {
metadata {
name = "wireguard"
namespace = kubernetes_namespace.wireguard.metadata[0].name
annotations = {
"metallb.universe.tf/allow-shared-ip" = "shared"
}
labels = {
"app" = "wireguard"
}
}
spec {
type = "LoadBalancer"
external_traffic_policy = "Cluster"
selector = {
app = "wireguard"
}
port {
port = "51820"
protocol = "UDP"
}
}
}
resource "kubernetes_service" "wireguard_exporter" {
metadata {
name = "wireguard-exporter"
namespace = kubernetes_namespace.wireguard.metadata[0].name
labels = {
"app" = "wireguard-exporter"
}
}
spec {
selector = {
app = "wireguard"
}
port {
port = "9102"
target_port = "9586"
}
}
}