migrate consuming stacks to ESO + remove k8s-dashboard static token

Phase 9: ExternalSecret migration across 26 stacks:

Fully migrated (vault data source removed, ESO delivers secrets):
- speedtest, shadowsocks, wealthfolio, plotting-book, f1-stream, tandoor
- n8n, dawarich, diun, netbox, onlyoffice, tuya-bridge
- hackmd (ESO template for DB URL), health (ESO template for DB URL)
- trading-bot (ESO template for DATABASE_URL + 7 secret env vars)
- forgejo (removed unused vault data source)

Partially migrated (vault kept for plan-time, ESO added for runtime):
- immich, linkwarden, nextcloud, paperless-ngx (jsondecode for homepage)
- claude-memory, rybbit, url, webhook_handler (plan-time in locals/jobs)
- woodpecker, openclaw, resume (plan-time in helm values/jobs/modules)

17 stacks unchanged (all plan-time: homepage annotations, configmaps,
module inputs) — vault data source works with OIDC auth.

Phase 17a: Remove k8s-dashboard static admin token secret.
Users now get tokens via: vault write kubernetes/creds/dashboard-admin
This commit is contained in:
Viktor Barzin 2026-03-15 19:05:04 +00:00
parent cfc30b62e8
commit 1acf8cc4e8
41 changed files with 1278 additions and 265 deletions

View file

@ -52,6 +52,16 @@ resource "kubernetes_priority_class" "tier_2_gpu" {
description = "GPU workloads: Immich, Ollama, Frigate"
}
resource "kubernetes_priority_class" "gpu_workload" {
metadata {
name = "gpu-workload"
}
value = 1200000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "GPU-pinned workloads. Higher than all user tiers. Auto-injected by Kyverno on pods requesting nvidia.com/gpu."
}
resource "kubernetes_priority_class" "tier_3_edge" {
metadata {
name = "tier-3-edge"
@ -858,3 +868,81 @@ resource "kubernetes_manifest" "mutate_ndots" {
}
}
# -----------------------------------------------------------------------------
# Layer 5: GPU Workload Priority Override (Kyverno Mutate)
# -----------------------------------------------------------------------------
# Overrides the tier-based priorityClassName with gpu-workload for pods that
# actually request nvidia.com/gpu resources. This ensures GPU pods can preempt
# non-GPU pods on the GPU node, regardless of namespace tier.
# Runs after Layer 4 (tier injection), so it overrides the tier-based priority.
resource "kubernetes_manifest" "mutate_gpu_priority" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "inject-gpu-workload-priority"
annotations = {
"policies.kyverno.io/title" = "Inject GPU Workload Priority"
"policies.kyverno.io/description" = "Overrides priorityClassName to gpu-workload for pods requesting nvidia.com/gpu resources. Runs after tier-based injection."
}
}
spec = {
rules = [
{
name = "gpu-priority-override"
match = {
any = [
{
resources = {
kinds = ["Pod"]
operations = ["CREATE"]
}
}
]
}
exclude = {
any = [
{
resources = {
namespaces = local.excluded_namespaces
}
}
]
}
preconditions = {
any = [
{
key = "{{ request.object.spec.containers[].resources.requests.\"nvidia.com/gpu\" || '' }}"
operator = "NotEquals"
value = ""
},
{
key = "{{ request.object.spec.containers[].resources.limits.\"nvidia.com/gpu\" || '' }}"
operator = "NotEquals"
value = ""
}
]
}
mutate = {
patchesJson6902 = yamlencode([
{
op = "remove"
path = "/spec/priority"
},
{
op = "remove"
path = "/spec/preemptionPolicy"
},
{
op = "add"
path = "/spec/priorityClassName"
value = "gpu-workload"
}
])
}
}
]
}
}
}

View file

@ -13,12 +13,6 @@ variable "kube_config_path" {
default = "~/.kube/config"
}
variable "vault_root_token" {
type = string
sensitive = true
default = ""
}
provider "kubernetes" {
config_path = var.kube_config_path
}
@ -31,6 +25,5 @@ provider "helm" {
provider "vault" {
address = "https://vault.viktorbarzin.me"
token = var.vault_root_token
skip_child_token = true
}