## Context
Deploying new services required manually adding hostnames to
cloudflare_proxied_names/cloudflare_non_proxied_names in config.tfvars —
a separate file from the service stack. This was frequently forgotten,
leaving services unreachable externally.
## This change:
- Add `dns_type` parameter to `ingress_factory` and `reverse_proxy/factory`
modules. Setting `dns_type = "proxied"` or `"non-proxied"` auto-creates
the Cloudflare DNS record (CNAME to tunnel or A/AAAA to public IP).
- Simplify cloudflared tunnel from 100 per-hostname rules to wildcard
`*.viktorbarzin.me → Traefik`. Traefik still handles host-based routing.
- Add global Cloudflare provider via terragrunt.hcl (separate
cloudflare_provider.tf with Vault-sourced API key).
- Migrate 118 hostnames from centralized config.tfvars to per-service
dns_type. 17 hostnames remain centrally managed (Helm ingresses,
special cases).
- Update docs, AGENTS.md, CLAUDE.md, dns.md runbook.
```
BEFORE AFTER
config.tfvars (manual list) stacks/<svc>/main.tf
| module "ingress" {
v dns_type = "proxied"
stacks/cloudflared/ }
for_each = list |
cloudflare_record auto-creates
tunnel per-hostname cloudflare_record + annotation
```
## What is NOT in this change:
- Uptime Kuma monitor migration (still reads from config.tfvars)
- 17 remaining centrally-managed hostnames (Helm, special cases)
- Removal of allow_overwrite (keep until migration confirmed stable)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
128 lines
2.5 KiB
HCL
128 lines
2.5 KiB
HCL
variable "tls_secret_name" {
|
|
type = string
|
|
sensitive = true
|
|
}
|
|
|
|
|
|
module "tls_secret" {
|
|
source = "../../modules/kubernetes/setup_tls_secret"
|
|
namespace = kubernetes_namespace.dashy.metadata[0].name
|
|
tls_secret_name = var.tls_secret_name
|
|
}
|
|
|
|
resource "kubernetes_namespace" "dashy" {
|
|
metadata {
|
|
name = "dashy"
|
|
labels = {
|
|
"istio-injection" : "disabled"
|
|
tier = local.tiers.aux
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_config_map" "config" {
|
|
metadata {
|
|
name = "config"
|
|
namespace = kubernetes_namespace.dashy.metadata[0].name
|
|
|
|
annotations = {
|
|
"reloader.stakater.com/match" = "true"
|
|
}
|
|
}
|
|
|
|
data = {
|
|
"conf.yml" = file("${path.module}/conf.yml")
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_deployment" "dashy" {
|
|
metadata {
|
|
name = "dashy"
|
|
namespace = kubernetes_namespace.dashy.metadata[0].name
|
|
labels = {
|
|
app = "dashy"
|
|
tier = local.tiers.aux
|
|
}
|
|
annotations = {
|
|
"reloader.stakater.com/search" = "true"
|
|
}
|
|
}
|
|
spec {
|
|
# Disabled: reduce cluster memory pressure (2026-03-14 OOM incident)
|
|
replicas = 0
|
|
selector {
|
|
match_labels = {
|
|
app = "dashy"
|
|
}
|
|
}
|
|
template {
|
|
metadata {
|
|
annotations = {
|
|
# "diun.enable" = "true"
|
|
}
|
|
labels = {
|
|
app = "dashy"
|
|
}
|
|
}
|
|
spec {
|
|
container {
|
|
image = "lissy93/dashy:latest"
|
|
name = "dashy"
|
|
|
|
resources {
|
|
requests = {
|
|
cpu = "250m"
|
|
memory = "1Gi"
|
|
}
|
|
limits = {
|
|
memory = "1Gi"
|
|
}
|
|
}
|
|
port {
|
|
container_port = 8080
|
|
}
|
|
volume_mount {
|
|
name = "config"
|
|
mount_path = "/app/user-data/"
|
|
}
|
|
}
|
|
volume {
|
|
name = "config"
|
|
config_map {
|
|
name = "config"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_service" "dashy" {
|
|
metadata {
|
|
name = "dashy"
|
|
namespace = kubernetes_namespace.dashy.metadata[0].name
|
|
labels = {
|
|
app = "dashy"
|
|
}
|
|
}
|
|
|
|
spec {
|
|
selector = {
|
|
app = "dashy"
|
|
}
|
|
port {
|
|
name = "http"
|
|
port = 80
|
|
target_port = 8080
|
|
}
|
|
}
|
|
}
|
|
|
|
module "ingress" {
|
|
source = "../../modules/kubernetes/ingress_factory"
|
|
dns_type = "proxied"
|
|
namespace = kubernetes_namespace.dashy.metadata[0].name
|
|
name = "dashy"
|
|
tls_secret_name = var.tls_secret_name
|
|
protected = true # hidden as we use homepage now
|
|
}
|