2021-02-07 23:45:55 +00:00
|
|
|
variable "tls_secret_name" {}
|
|
|
|
|
variable "alertmanager_account_password" {}
|
2021-04-05 15:06:24 +01:00
|
|
|
variable "idrac_host" {
|
2023-10-23 22:05:54 +00:00
|
|
|
default = "192.168.1.4"
|
2021-04-05 15:06:24 +01:00
|
|
|
}
|
|
|
|
|
variable "idrac_username" {
|
|
|
|
|
default = "root"
|
|
|
|
|
}
|
|
|
|
|
variable "idrac_password" {
|
2026-03-14 08:51:45 +00:00
|
|
|
default = "calvin"
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
2021-04-05 15:06:24 +01:00
|
|
|
}
|
2022-01-06 20:09:20 +00:00
|
|
|
variable "alertmanager_slack_api_url" {}
|
2026-03-07 14:30:36 +00:00
|
|
|
variable "tiny_tuya_service_secret" {
|
2026-03-14 08:51:45 +00:00
|
|
|
type = string
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
|
|
|
|
}
|
|
|
|
|
variable "haos_api_token" {
|
2026-03-14 08:51:45 +00:00
|
|
|
type = string
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
|
|
|
|
}
|
|
|
|
|
variable "pve_password" {
|
2026-03-14 08:51:45 +00:00
|
|
|
type = string
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
|
|
|
|
}
|
|
|
|
|
variable "grafana_admin_password" {
|
2026-03-14 08:51:45 +00:00
|
|
|
type = string
|
2026-03-07 14:30:36 +00:00
|
|
|
sensitive = true
|
|
|
|
|
}
|
2026-01-10 16:28:12 +00:00
|
|
|
variable "tier" { type = string }
|
[ci skip] Infrastructure hardening: security, monitoring, reliability, maintainability
Phase 1 - Critical Security:
- Netbox: move hardcoded DB/superuser passwords to variables
- MeshCentral: disable public registration, add Authentik auth
- Traefik: disable insecure API dashboard (api.insecure=false)
- Traefik: configure forwarded headers with Cloudflare trusted IPs
Phase 2 - Security Hardening:
- Add security headers middleware (HSTS, X-Frame-Options, nosniff, etc.)
- Add Kyverno pod security policies in audit mode (privileged, host
namespaces, SYS_ADMIN, trusted registries)
- Tighten rate limiting (avg=10, burst=50)
- Add Authentik protection to grampsweb
Phase 3 - Monitoring & Alerting:
- Add critical service alerts (PostgreSQL, MySQL, Redis, Headscale,
Authentik, Loki)
- Increase Loki retention from 7 to 30 days (720h)
- Add predictive PV filling alert (predict_linear)
- Re-enable Hackmd and Privatebin down alerts
Phase 4 - Reliability:
- Add resource requests/limits to Redis, DBaaS, Technitium, Headscale,
Vaultwarden, Uptime Kuma
- Increase Alloy DaemonSet memory to 512Mi/1Gi
Phase 6 - Maintainability:
- Extract duplicated tiers locals to terragrunt.hcl generate block
(removed from 67 stacks)
- Replace hardcoded NFS IP 10.0.10.15 with var.nfs_server (114
instances across 63 files)
- Replace hardcoded Redis/PostgreSQL/MySQL/Ollama/mail host references
with variables across ~35 stacks
- Migrate xray raw ingress resources to ingress_factory modules
2026-02-23 22:05:28 +00:00
|
|
|
variable "mysql_host" { type = string }
|
2021-02-07 23:45:55 +00:00
|
|
|
|
2025-12-29 10:23:42 +00:00
|
|
|
resource "kubernetes_namespace" "monitoring" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "monitoring"
|
|
|
|
|
labels = {
|
|
|
|
|
"istio-injection" : "disabled"
|
2026-02-15 18:48:33 +00:00
|
|
|
tier = var.tier
|
|
|
|
|
"resource-governance/custom-quota" = "true"
|
2025-12-29 10:23:42 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-07 23:45:55 +00:00
|
|
|
module "tls_secret" {
|
2026-02-22 14:38:14 +00:00
|
|
|
source = "../../../../modules/kubernetes/setup_tls_secret"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
2021-02-07 23:45:55 +00:00
|
|
|
tls_secret_name = var.tls_secret_name
|
|
|
|
|
}
|
|
|
|
|
# Terraform get angry with the 30k values file :/ use ansible until solved
|
2024-12-15 18:11:35 +00:00
|
|
|
# resource "helm_release" "ups_prometheus_snmp_exporter" {
|
2025-12-29 10:23:42 +00:00
|
|
|
# namespace = kubernetes_namespace.monitoring.metadata[0].name
|
2021-02-07 23:45:55 +00:00
|
|
|
# create_namespace = true
|
2024-12-15 18:11:35 +00:00
|
|
|
# name = "ups_prometheus_exporter"
|
2021-02-07 23:45:55 +00:00
|
|
|
|
|
|
|
|
# repository = "https://prometheus-community.github.io/helm-charts"
|
|
|
|
|
# chart = "prometheus-snmp-exporter"
|
|
|
|
|
|
2024-12-15 18:11:35 +00:00
|
|
|
# values = [file("${path.module}/ups_snmp_values.yaml")]
|
2021-02-07 23:45:55 +00:00
|
|
|
# }
|
|
|
|
|
|
|
|
|
|
|
2021-02-18 22:26:36 +00:00
|
|
|
|
2022-08-31 22:04:09 +01:00
|
|
|
resource "kubernetes_cron_job_v1" "monitor_prom" {
|
2021-04-02 23:14:47 +01:00
|
|
|
metadata {
|
|
|
|
|
name = "monitor-prometheus"
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
concurrency_policy = "Replace"
|
|
|
|
|
failed_jobs_history_limit = 5
|
|
|
|
|
schedule = "*/30 * * * *"
|
|
|
|
|
job_template {
|
|
|
|
|
metadata {
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
template {
|
|
|
|
|
metadata {
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
container {
|
|
|
|
|
name = "monitor-prometheus"
|
|
|
|
|
image = "alpine"
|
|
|
|
|
command = ["/bin/sh", "-c", "apk add --update curl && curl --connect-timeout 2 prometheus-server.monitoring.svc.cluster.local || curl https://webhook.viktorbarzin.me/fb/message-viktor -d 'Prometheus is down!'"]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-07 13:25:49 +00:00
|
|
|
resource "kubernetes_manifest" "status_redirect_middleware" {
|
|
|
|
|
manifest = {
|
|
|
|
|
apiVersion = "traefik.io/v1alpha1"
|
|
|
|
|
kind = "Middleware"
|
|
|
|
|
metadata = {
|
|
|
|
|
name = "status-redirect"
|
|
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
|
|
|
}
|
|
|
|
|
spec = {
|
|
|
|
|
redirectRegex = {
|
|
|
|
|
regex = ".*"
|
|
|
|
|
replacement = "https://hetrixtools.com/r/38981b548b5d38b052aca8d01285a3f3/"
|
|
|
|
|
permanent = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-06 00:07:48 +00:00
|
|
|
resource "kubernetes_ingress_v1" "status" {
|
2021-02-18 22:26:36 +00:00
|
|
|
metadata {
|
|
|
|
|
name = "hetrix-redirect-ingress"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
2021-02-18 22:26:36 +00:00
|
|
|
annotations = {
|
2026-02-07 13:25:49 +00:00
|
|
|
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-status-redirect@kubernetescrd"
|
|
|
|
|
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
|
2021-02-18 22:26:36 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spec {
|
2026-02-07 13:25:49 +00:00
|
|
|
ingress_class_name = "traefik"
|
2021-02-18 22:26:36 +00:00
|
|
|
tls {
|
|
|
|
|
hosts = ["status.viktorbarzin.me"]
|
|
|
|
|
secret_name = var.tls_secret_name
|
|
|
|
|
}
|
|
|
|
|
rule {
|
|
|
|
|
host = "status.viktorbarzin.me"
|
|
|
|
|
http {
|
|
|
|
|
path {
|
|
|
|
|
path = "/"
|
|
|
|
|
backend {
|
2022-01-06 00:07:48 +00:00
|
|
|
service {
|
|
|
|
|
name = "not-used"
|
|
|
|
|
port {
|
2026-02-07 13:25:49 +00:00
|
|
|
number = 80 # redirected by middleware
|
2022-01-06 00:07:48 +00:00
|
|
|
}
|
|
|
|
|
}
|
2021-02-18 22:26:36 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-07 13:25:49 +00:00
|
|
|
resource "kubernetes_manifest" "yotovski_redirect_middleware" {
|
|
|
|
|
manifest = {
|
|
|
|
|
apiVersion = "traefik.io/v1alpha1"
|
|
|
|
|
kind = "Middleware"
|
|
|
|
|
metadata = {
|
|
|
|
|
name = "yotovski-redirect"
|
|
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
|
|
|
}
|
|
|
|
|
spec = {
|
|
|
|
|
redirectRegex = {
|
|
|
|
|
regex = ".*"
|
|
|
|
|
replacement = "https://hetrixtools.com/r/2ba9d7a5e017794db0fd91f0115a8b3b/"
|
|
|
|
|
permanent = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-06 00:07:48 +00:00
|
|
|
resource "kubernetes_ingress_v1" "status_yotovski" {
|
2021-02-18 22:26:36 +00:00
|
|
|
metadata {
|
|
|
|
|
name = "hetrix-yotovski-redirect-ingress"
|
2025-12-29 10:23:42 +00:00
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
2021-02-18 22:26:36 +00:00
|
|
|
annotations = {
|
2026-02-07 13:25:49 +00:00
|
|
|
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-yotovski-redirect@kubernetescrd"
|
|
|
|
|
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
|
2021-02-18 22:26:36 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spec {
|
2026-02-07 13:25:49 +00:00
|
|
|
ingress_class_name = "traefik"
|
2021-02-18 22:26:36 +00:00
|
|
|
tls {
|
|
|
|
|
hosts = ["yotovski-status.viktorbarzin.me"]
|
|
|
|
|
secret_name = var.tls_secret_name
|
|
|
|
|
}
|
|
|
|
|
rule {
|
|
|
|
|
host = "yotovski-status.viktorbarzin.me"
|
|
|
|
|
http {
|
|
|
|
|
path {
|
|
|
|
|
path = "/"
|
|
|
|
|
backend {
|
2022-01-06 00:07:48 +00:00
|
|
|
service {
|
2026-02-07 13:25:49 +00:00
|
|
|
name = "not-used" # redirected by middleware
|
2022-01-06 00:07:48 +00:00
|
|
|
port {
|
|
|
|
|
number = 80
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-02-18 22:26:36 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-04-05 15:06:24 +01:00
|
|
|
|
2026-02-15 18:48:33 +00:00
|
|
|
# Custom ResourceQuota for monitoring — larger than the default 1-cluster tier quota
|
|
|
|
|
# because monitoring runs 29+ pods (Prometheus, Grafana, Loki, Alloy, exporters, etc.)
|
|
|
|
|
resource "kubernetes_resource_quota" "monitoring" {
|
|
|
|
|
metadata {
|
|
|
|
|
name = "monitoring-quota"
|
|
|
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
|
|
|
}
|
|
|
|
|
spec {
|
|
|
|
|
hard = {
|
|
|
|
|
"requests.cpu" = "16"
|
|
|
|
|
"requests.memory" = "16Gi"
|
resource quota review: fix OOM risks, close quota gaps, add HA protections
Phase 1 - OOM fixes:
- dashy: increase memory limit 512Mi→1Gi (was at 99% utilization)
- caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%)
- mysql-operator: add Helm resource values 256Mi/512Mi, create namespace
with tier label (was at 92% of LimitRange default)
- prowlarr, flaresolverr, annas-archive-stacks: add explicit resources
(outgrowing 256Mi LimitRange defaults)
- real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no
explicit resources)
Phase 2 - Close quota gaps:
- nvidia, real-estate-crawler, trading-bot: remove custom-quota=true
labels so Kyverno generates tier-appropriate quotas
- descheduler: add tier=1-cluster label for proper classification
Phase 3 - Reduce excessive quotas:
- monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64
- woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16
- GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16
Phase 4 - Kubelet protection:
- Add cpu: 200m to systemReserved and kubeReserved in kubelet template
Phase 5 - HA improvements:
- cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1)
- grafana: add topology spread + PDB via Helm values
- crowdsec LAPI: add topology spread + PDB via Helm values
- authentik server: add topology spread via Helm values
- authentik worker: add topology spread + PDB via Helm values
2026-03-08 18:17:46 +00:00
|
|
|
"limits.memory" = "64Gi"
|
2026-02-15 18:48:33 +00:00
|
|
|
pods = "100"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|