Full cluster resource audit: cross-referenced Goldilocks VPA recommendations, live kubectl top metrics, and Terraform definitions for 100+ containers. Critical fixes: - dashy: CPU throttled at 98% (490m/500m) → 2 CPU limit - stirling-pdf: CPU throttled at 99.7% (299m/300m) → 2 CPU limit - traefik auth-proxy/bot-block-proxy: mem limit 32Mi → 128Mi Added explicit resources to ~40 containers that had none: - audiobookshelf, changedetection, cyberchef, dawarich, diun, echo, excalidraw, freshrss, hackmd, isponsorblocktv, linkwarden, n8n, navidrome, ntfy, owntracks, privatebin, send, shadowsocks, tandoor, tor-proxy, wealthfolio, networking-toolbox, rybbit, mailserver, cloudflared, pgadmin, phpmyadmin, crowdsec-web, xray, wireguard, k8s-portal, tuya-bridge, ollama-ui, whisper, piper, immich-server, immich-postgresql, osrm-foot GPU containers: added CPU/mem alongside GPU limits: - ollama: removed CPU/mem limits (models vary in size), keep GPU only - frigate: req 500m/2Gi, lim 4/8Gi + GPU - immich-ml: req 100m/1Gi, lim 2/4Gi + GPU Right-sized ~25 over-provisioned containers: - kms-web-page: 500m/512Mi → 50m/64Mi (was using 0m/10Mi) - onlyoffice: CPU 8 → 2 (VPA upper 45m) - realestate-crawler-api: CPU 2000m → 250m - blog/travel-blog/webhook-handler: 500m → 100m - coturn/health/plotting-book: reduced to match actual usage Conservative methodology: limits = max(VPA upper * 2, live usage * 2)
166 lines
4.1 KiB
HCL
166 lines
4.1 KiB
HCL
variable "hackmd_db_password" { type = string }
|
|
variable "tls_secret_name" { type = string }
|
|
variable "nfs_server" { type = string }
|
|
variable "mysql_host" { type = string }
|
|
|
|
|
|
resource "kubernetes_namespace" "hackmd" {
|
|
metadata {
|
|
name = "hackmd"
|
|
labels = {
|
|
"istio-injection" : "disabled"
|
|
tier = local.tiers.edge
|
|
}
|
|
}
|
|
}
|
|
|
|
module "tls_secret" {
|
|
source = "../../modules/kubernetes/setup_tls_secret"
|
|
namespace = kubernetes_namespace.hackmd.metadata[0].name
|
|
tls_secret_name = var.tls_secret_name
|
|
}
|
|
|
|
resource "kubernetes_deployment" "hackmd" {
|
|
metadata {
|
|
name = "hackmd"
|
|
namespace = kubernetes_namespace.hackmd.metadata[0].name
|
|
labels = {
|
|
app = "hackmd"
|
|
"kubernetes.io/cluster-service" = "true"
|
|
tier = local.tiers.edge
|
|
}
|
|
}
|
|
spec {
|
|
replicas = 1
|
|
strategy {
|
|
type = "RollingUpdate" # DB is external so we can roll
|
|
}
|
|
selector {
|
|
match_labels = {
|
|
app = "hackmd"
|
|
}
|
|
}
|
|
template {
|
|
metadata {
|
|
labels = {
|
|
app = "hackmd"
|
|
"kubernetes.io/cluster-service" = "true"
|
|
}
|
|
}
|
|
spec {
|
|
# container {
|
|
# image = "postgres:11.6-alpine"
|
|
# name = "postgres"
|
|
# image_pull_policy = "IfNotPresent"
|
|
# env {
|
|
# name = "POSTGRES_USER"
|
|
# value = "codimd"
|
|
# }
|
|
# env {
|
|
# name = "POSTGRES_PASSWORD"
|
|
# value = var.hackmd_db_password
|
|
# }
|
|
# env {
|
|
# name = "POSTGRES_DB"
|
|
# value = "codimd"
|
|
# }
|
|
# resources {
|
|
# limits = {
|
|
# cpu = "1"
|
|
# memory = "1Gi"
|
|
# }
|
|
# requests = {
|
|
# cpu = "1"
|
|
# memory = "1Gi"
|
|
# }
|
|
# }
|
|
# port {
|
|
# container_port = 80
|
|
# }
|
|
# volume_mount {
|
|
# name = "data"
|
|
# mount_path = "/var/lib/postgresql/data"
|
|
# sub_path = "postgres"
|
|
# }
|
|
# }
|
|
|
|
container {
|
|
name = "codimd"
|
|
image = "hackmdio/hackmd"
|
|
env {
|
|
name = "CMD_DB_URL"
|
|
# value = format("%s%s%s", "postgres://codimd:", var.hackmd_db_password, "@localhost/codimd")
|
|
value = format("%s%s%s", "mysql://codimd:", var.hackmd_db_password, "@${var.mysql_host}/codimd")
|
|
}
|
|
env {
|
|
name = "CMD_USECDN"
|
|
value = "false"
|
|
}
|
|
volume_mount {
|
|
name = "data"
|
|
mount_path = "/home/hackmd/app/public/uploads"
|
|
sub_path = "hackmd"
|
|
}
|
|
port {
|
|
name = "http"
|
|
container_port = 3000
|
|
protocol = "TCP"
|
|
}
|
|
resources {
|
|
requests = {
|
|
cpu = "15m"
|
|
memory = "64Mi"
|
|
}
|
|
limits = {
|
|
cpu = "250m"
|
|
memory = "512Mi"
|
|
}
|
|
}
|
|
}
|
|
security_context {
|
|
fs_group = "1500"
|
|
}
|
|
volume {
|
|
name = "data"
|
|
nfs {
|
|
path = "/mnt/main/hackmd"
|
|
server = var.nfs_server
|
|
}
|
|
# iscsi {
|
|
# target_portal = "iscsi.viktorbarzin.lan:3260"
|
|
# fs_type = "ext4"
|
|
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:hackmd"
|
|
# lun = 0
|
|
# read_only = false
|
|
# }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_service" "hackmd" {
|
|
metadata {
|
|
name = "hackmd"
|
|
namespace = kubernetes_namespace.hackmd.metadata[0].name
|
|
labels = {
|
|
"app" = "hackmd"
|
|
}
|
|
}
|
|
|
|
spec {
|
|
selector = {
|
|
app = "hackmd"
|
|
}
|
|
port {
|
|
port = "80"
|
|
target_port = "3000"
|
|
}
|
|
}
|
|
}
|
|
module "ingress" {
|
|
source = "../../modules/kubernetes/ingress_factory"
|
|
namespace = kubernetes_namespace.hackmd.metadata[0].name
|
|
name = "hackmd"
|
|
tls_secret_name = var.tls_secret_name
|
|
}
|