2024-12-23 18:20:16 +00:00
|
|
|
# Contents for cloudflare tunnel
|
|
|
|
|
|
2024-01-06 21:33:05 +00:00
|
|
|
variable "tls_secret_name" {}
|
2024-12-23 18:20:16 +00:00
|
|
|
variable "cloudflare_tunnel_token" {}
|
2024-01-06 21:33:05 +00:00
|
|
|
resource "kubernetes_namespace" "cloudflared" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "cloudflared"
|
2026-02-21 23:38:05 +00:00
|
|
|
labels = {
|
|
|
|
|
tier = var.tier
|
|
|
|
|
}
|
2024-01-06 21:33:05 +00:00
|
|
|
}
|
|
|
|
|
}
|
2026-01-10 16:28:12 +00:00
|
|
|
variable "tier" { type = string }
|
2024-01-06 21:33:05 +00:00
|
|
|
|
|
|
|
|
module "tls_secret" {
|
2026-02-22 14:38:14 +00:00
|
|
|
source = "../../../../modules/kubernetes/setup_tls_secret"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
2024-01-06 21:33:05 +00:00
|
|
|
tls_secret_name = var.tls_secret_name
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resource "kubernetes_deployment" "cloudflared" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "cloudflared"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
2024-01-06 21:33:05 +00:00
|
|
|
labels = {
|
2026-01-10 16:28:12 +00:00
|
|
|
app = "cloudflared"
|
|
|
|
|
tier = var.tier
|
2024-01-06 21:33:05 +00:00
|
|
|
}
|
|
|
|
|
annotations = {
|
|
|
|
|
"reloader.stakater.com/search" = "true"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spec {
|
2025-04-06 18:13:44 +00:00
|
|
|
replicas = 3
|
2024-01-06 21:33:05 +00:00
|
|
|
strategy {
|
|
|
|
|
type = "RollingUpdate"
|
|
|
|
|
}
|
|
|
|
|
selector {
|
|
|
|
|
match_labels = {
|
|
|
|
|
app = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
template {
|
|
|
|
|
metadata {
|
|
|
|
|
labels = {
|
|
|
|
|
app = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spec {
|
resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
(outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
explicit resources)
Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification
Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16
Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template
Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00
|
|
|
topology_spread_constraint {
|
|
|
|
|
max_skew = 1
|
|
|
|
|
topology_key = "kubernetes.io/hostname"
|
|
|
|
|
when_unsatisfiable = "ScheduleAnyway"
|
|
|
|
|
label_selector {
|
|
|
|
|
match_labels = {
|
|
|
|
|
app = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-01-06 21:33:05 +00:00
|
|
|
container {
|
2024-12-23 18:20:16 +00:00
|
|
|
# image = "wisdomsky/cloudflared-web:latest"
|
|
|
|
|
image = "cloudflare/cloudflared"
|
|
|
|
|
name = "cloudflared"
|
|
|
|
|
command = ["cloudflared", "tunnel", "run"]
|
|
|
|
|
env {
|
|
|
|
|
name = "TUNNEL_TOKEN"
|
|
|
|
|
value = var.cloudflare_tunnel_token
|
|
|
|
|
}
|
2024-01-06 21:33:05 +00:00
|
|
|
|
|
|
|
|
port {
|
|
|
|
|
container_port = 14333
|
|
|
|
|
}
|
[ci skip] right-size all pod resources based on VPA + live metrics audit
Full cluster resource audit: cross-referenced Goldilocks VPA recommendations,
live kubectl top metrics, and Terraform definitions for 100+ containers.
Critical fixes:
- dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit
- stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit
- traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi
Added explicit resources to ~40 containers that had none:
- audiobookshelf, changedetection, cyberchef, dawarich, diun, echo,
excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n,
navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor,
tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver,
cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard,
k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server,
immich-postgresql, osrm-foot
GPU containers: added CPU/mem alongside GPU limits:
- ollama: removed CPU/mem limits (models vary in size), keep GPU only
- frigate: req 500m/2Gi, lim 4/8Gi + GPU
- immich-ml: req 100m/1Gi, lim 2/4Gi + GPU
Right-sized ~25 over-provisioned containers:
- kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi)
- onlyoffice: CPU 8 → 2 (VPA upper 45m)
- realestate-crawler-api: CPU 2000m → 250m
- blog/travel-blog/webhook-handler: 500m → 100m
- coturn/health/plotting-book: reduced to match actual usage
Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
2026-03-01 19:18:50 +00:00
|
|
|
resources {
|
|
|
|
|
requests = {
|
|
|
|
|
cpu = "15m"
|
2026-03-14 21:46:49 +00:00
|
|
|
memory = "128Mi"
|
[ci skip] right-size all pod resources based on VPA + live metrics audit
Full cluster resource audit: cross-referenced Goldilocks VPA recommendations,
live kubectl top metrics, and Terraform definitions for 100+ containers.
Critical fixes:
- dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit
- stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit
- traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi
Added explicit resources to ~40 containers that had none:
- audiobookshelf, changedetection, cyberchef, dawarich, diun, echo,
excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n,
navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor,
tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver,
cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard,
k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server,
immich-postgresql, osrm-foot
GPU containers: added CPU/mem alongside GPU limits:
- ollama: removed CPU/mem limits (models vary in size), keep GPU only
- frigate: req 500m/2Gi, lim 4/8Gi + GPU
- immich-ml: req 100m/1Gi, lim 2/4Gi + GPU
Right-sized ~25 over-provisioned containers:
- kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi)
- onlyoffice: CPU 8 → 2 (VPA upper 45m)
- realestate-crawler-api: CPU 2000m → 250m
- blog/travel-blog/webhook-handler: 500m → 100m
- coturn/health/plotting-book: reduced to match actual usage
Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
2026-03-01 19:18:50 +00:00
|
|
|
}
|
|
|
|
|
limits = {
|
2026-03-14 21:46:49 +00:00
|
|
|
memory = "128Mi"
|
[ci skip] right-size all pod resources based on VPA + live metrics audit
Full cluster resource audit: cross-referenced Goldilocks VPA recommendations,
live kubectl top metrics, and Terraform definitions for 100+ containers.
Critical fixes:
- dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit
- stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit
- traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi
Added explicit resources to ~40 containers that had none:
- audiobookshelf, changedetection, cyberchef, dawarich, diun, echo,
excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n,
navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor,
tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver,
cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard,
k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server,
immich-postgresql, osrm-foot
GPU containers: added CPU/mem alongside GPU limits:
- ollama: removed CPU/mem limits (models vary in size), keep GPU only
- frigate: req 500m/2Gi, lim 4/8Gi + GPU
- immich-ml: req 100m/1Gi, lim 2/4Gi + GPU
Right-sized ~25 over-provisioned containers:
- kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi)
- onlyoffice: CPU 8 → 2 (VPA upper 45m)
- realestate-crawler-api: CPU 2000m → 250m
- blog/travel-blog/webhook-handler: 500m → 100m
- coturn/health/plotting-book: reduced to match actual usage
Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
2026-03-01 19:18:50 +00:00
|
|
|
}
|
|
|
|
|
}
|
2024-01-06 21:33:05 +00:00
|
|
|
}
|
2026-02-23 22:43:05 +00:00
|
|
|
dns_config {
|
|
|
|
|
option {
|
|
|
|
|
name = "ndots"
|
|
|
|
|
value = "2"
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-01-06 21:33:05 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
(outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
explicit resources)
Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification
Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16
Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template
Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00
|
|
|
resource "kubernetes_pod_disruption_budget_v1" "cloudflared" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "cloudflared"
|
|
|
|
|
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
max_unavailable = "1"
|
|
|
|
|
selector {
|
|
|
|
|
match_labels = {
|
|
|
|
|
app = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-06 21:33:05 +00:00
|
|
|
resource "kubernetes_service" "cloudflared" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "cloudflared"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
2024-01-06 21:33:05 +00:00
|
|
|
labels = {
|
|
|
|
|
"app" = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spec {
|
|
|
|
|
selector = {
|
|
|
|
|
app = "cloudflared"
|
|
|
|
|
}
|
|
|
|
|
port {
|
|
|
|
|
name = "http"
|
|
|
|
|
target_port = 14333
|
|
|
|
|
port = 80
|
|
|
|
|
protocol = "TCP"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|