truenas deprecation: migrate all non-immich storage to proxmox NFS

- Migrate 7 backup CronJobs to Proxmox host NFS (192.168.1.127)
  (etcd, mysql, postgresql, nextcloud, redis, vaultwarden, plotting-book)
- Migrate headscale backup, ebook2audiobook, osm_routing to Proxmox NFS
- Migrate servarr (lidarr, readarr, soulseek) NFS refs to Proxmox
- Remove 79 orphaned TrueNAS NFS module declarations from 49 stacks
- Delete stacks/platform/modules/ (27 dead module copies, 65MB)
- Update nfs-truenas StorageClass to point to Proxmox (192.168.1.127)
- Remove iscsi DNS record from config.tfvars
- Fix woodpecker persistence config and alertmanager PV

Only Immich (8 PVCs, ~1.4TB) remains on TrueNAS.
This commit is contained in:
Viktor Barzin 2026-04-12 14:35:39 +01:00
parent 3246c4d112
commit 82b0f6c4cb
193 changed files with 825 additions and 177172 deletions

View file

@ -1,6 +1,6 @@
# Generated by Terragrunt. Sig: nIlQXj57tbuaRZEa
terraform {
backend "local" {
path = "/woodpecker/src/github.com/ViktorBarzin/infra/state/stacks/platform/terraform.tfstate"
path = "/Users/viktorbarzin/code/infra/state/stacks/platform/terraform.tfstate"
}
}

View file

@ -1,88 +0,0 @@
variable "tls_secret_name" {}
variable "secret_key" {}
variable "postgres_password" {}
variable "tier" { type = string }
variable "redis_host" { type = string }
variable "homepage_token" {
type = string
default = ""
sensitive = true
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.authentik.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "authentik" {
metadata {
name = "authentik"
labels = {
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "kubernetes_resource_quota" "authentik" {
metadata {
name = "authentik-quota"
namespace = kubernetes_namespace.authentik.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.memory" = "96Gi"
pods = "50"
}
}
}
resource "helm_release" "authentik" {
namespace = kubernetes_namespace.authentik.metadata[0].name
create_namespace = true
name = "goauthentik"
repository = "https://charts.goauthentik.io/"
chart = "authentik"
# version = "2025.8.1"
version = "2025.10.3"
atomic = true
timeout = 6000
values = [templatefile("${path.module}/values.yaml", { postgres_password = var.postgres_password, secret_key = var.secret_key, redis_host = var.redis_host })]
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.authentik.metadata[0].name
name = "authentik"
service_name = "goauthentik-server"
tls_secret_name = var.tls_secret_name
extra_annotations = {
"gethomepage.dev/enabled" = "true"
"gethomepage.dev/name" = "Authentik"
"gethomepage.dev/description" = "Identity provider"
"gethomepage.dev/icon" = "authentik.png"
"gethomepage.dev/group" = "Identity & Security"
"gethomepage.dev/pod-selector" = ""
"gethomepage.dev/widget.type" = "authentik"
"gethomepage.dev/widget.url" = "http://goauthentik-server.authentik.svc.cluster.local"
"gethomepage.dev/widget.key" = var.homepage_token
}
}
module "ingress-outpost" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.authentik.metadata[0].name
name = "authentik-outpost"
host = "authentik"
service_name = "ak-outpost-authentik-embedded-outpost"
port = 9000
ingress_path = ["/outpost.goauthentik.io"]
tls_secret_name = var.tls_secret_name
}

View file

@ -1,14 +0,0 @@
[databases]
authentik = host=postgresql.dbaas port=5432 dbname=authentik user=authentik password=${password}
[pgbouncer]
listen_addr = 0.0.0.0
listen_port = 6432
auth_type = md5
auth_file = /etc/pgbouncer/userlist.txt
pool_mode = transaction
max_client_conn = 200
default_pool_size = 20
reserve_pool_size = 5
reserve_pool_timeout = 5
ignore_startup_parameters = extra_float_digits

View file

@ -1,140 +0,0 @@
resource "kubernetes_config_map" "pgbouncer_config" {
metadata {
name = "pgbouncer-config"
namespace = "authentik"
}
data = {
"pgbouncer.ini" = templatefile("${path.module}/pgbouncer.ini", { password = var.postgres_password })
}
}
# --- 2 Secret for user credentials ---
resource "kubernetes_secret" "pgbouncer_auth" {
metadata {
name = "pgbouncer-auth"
namespace = "authentik"
}
data = {
"userlist.txt" = templatefile("${path.module}/userlist.txt", { password = var.postgres_password })
}
type = "Opaque"
}
# --- 3 Deployment ---
resource "kubernetes_deployment" "pgbouncer" {
metadata {
name = "pgbouncer"
namespace = "authentik"
labels = {
app = "pgbouncer"
tier = var.tier
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "pgbouncer"
}
}
template {
metadata {
labels = {
app = "pgbouncer"
}
}
spec {
affinity {
pod_anti_affinity {
required_during_scheduling_ignored_during_execution {
label_selector {
match_expressions {
key = "component"
operator = "In"
values = ["server"]
}
}
topology_key = "kubernetes.io/hostname"
}
}
}
container {
name = "pgbouncer"
image = "edoburu/pgbouncer:latest"
image_pull_policy = "IfNotPresent"
port {
container_port = 6432
}
volume_mount {
name = "config"
mount_path = "/etc/pgbouncer/pgbouncer.ini"
sub_path = "pgbouncer.ini"
}
volume_mount {
name = "auth"
mount_path = "/etc/pgbouncer/userlist.txt"
sub_path = "userlist.txt"
}
env {
name = "DATABASES_AUTHENTIK"
value = "host=postgres port=5432 dbname=authentik user=authentik password=${var.postgres_password}"
}
}
volume {
name = "config"
config_map {
name = kubernetes_config_map.pgbouncer_config.metadata[0].name
}
}
volume {
name = "auth"
secret {
secret_name = kubernetes_secret.pgbouncer_auth.metadata[0].name
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
depends_on = [kubernetes_secret.pgbouncer_auth]
}
# --- 4 Service ---
resource "kubernetes_service" "pgbouncer" {
metadata {
name = "pgbouncer"
namespace = "authentik"
}
spec {
selector = {
app = "pgbouncer"
}
port {
port = 6432
target_port = 6432
protocol = "TCP"
}
type = "ClusterIP"
}
}

View file

@ -1 +0,0 @@
"authentik" "${password}"

View file

@ -1,73 +0,0 @@
authentik:
log_level: warning
# log_level: trace
secret_key: "${secret_key}"
# This sends anonymous usage-data, stack traces on errors and
# performance data to authentik.error-reporting.a7k.io, and is fully opt-in
error_reporting:
enabled: true
postgresql:
# host: postgresql.dbaas
host: pgbouncer.authentik
port: 6432
user: authentik
password: ${postgres_password}
redis:
host: ${redis_host}
server:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: server
ingress:
enabled: false
# hosts:
# - authentik.viktorbarzin.me
podAnnotations:
diun.enable: true
diun.include_tags: "^202[0-9].[0-9]+.*$" # no need to annotate the worker as it uses the same image
pdb:
enabled: true
minAvailable: 2
global:
addPrometheusAnnotations: true
worker:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
resources:
requests:
cpu: 100m
memory: 896Mi
limits:
memory: 896Mi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: worker
pdb:
enabled: true
maxUnavailable: 1

View file

@ -1,173 +0,0 @@
# Contents for cloudflare account
variable "cloudflare_api_key" {}
variable "cloudflare_email" {}
variable "cloudflare_proxied_names" { type = list(string) }
variable "cloudflare_non_proxied_names" { type = list(string) }
variable "cloudflare_zone_id" {
description = "Zone ID for your domain"
type = string
}
variable "cloudflare_account_id" {
type = string
sensitive = true
}
variable "cloudflare_tunnel_id" {
type = string
sensitive = true
}
variable "public_ip" {
type = string
}
variable "public_ipv6" {
type = string
description = "Public IPv6 address for AAAA records (from HE tunnel broker)"
}
terraform {
required_providers {
cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 4"
}
}
}
provider "cloudflare" {
api_key = var.cloudflare_api_key # I gave up on getting the permissions on the token...
email = var.cloudflare_email
}
locals {
cloudflare_proxied_names_map = {
for h in var.cloudflare_proxied_names :
h => h
}
cloudflare_non_proxied_names_map = {
for h in var.cloudflare_non_proxied_names :
h => h
}
}
resource "cloudflare_zero_trust_tunnel_cloudflared_config" "sof" {
account_id = var.cloudflare_account_id
tunnel_id = var.cloudflare_tunnel_id
config {
warp_routing {
enabled = true
}
dynamic "ingress_rule" {
for_each = toset(var.cloudflare_proxied_names)
content {
hostname = ingress_rule.value == "viktorbarzin.me" ? ingress_rule.value : "${ingress_rule.value}.viktorbarzin.me"
path = "/"
service = "https://10.0.20.200:443"
origin_request {
no_tls_verify = true
}
}
}
ingress_rule {
service = "http_status:404"
}
}
}
resource "cloudflare_record" "dns_record" {
# count = length(var.cloudflare_proxied_names)
# name = var.cloudflare_proxied_names[count.index]
for_each = local.cloudflare_proxied_names_map
name = each.key
content = "${var.cloudflare_tunnel_id}.cfargotunnel.com"
proxied = true
ttl = 1
type = "CNAME"
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "non_proxied_dns_record" {
# count = length(var.cloudflare_non_proxied_names)
# name = var.cloudflare_non_proxied_names[count.index]
for_each = local.cloudflare_non_proxied_names_map
name = each.key
# content = var.non_proxied_names[count.index].ip
content = var.public_ip
proxied = false
ttl = 1
type = "A"
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "non_proxied_dns_record_ipv6" {
for_each = local.cloudflare_non_proxied_names_map
name = each.key
content = var.public_ipv6
proxied = false
ttl = 1
type = "AAAA"
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "mail" {
content = "mail.viktorbarzin.me"
name = "viktorbarzin.me"
proxied = false
ttl = 1
type = "MX"
priority = 1
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "mail_domainkey" {
content = "\"k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDIDLB8mhAHNqs1s6GeZMQHOxWweoNKIrqo5tqRM3yFilgfPUX34aTIXNZg9xAmlK+2S/xXO1ymt127ZGMjnoFKOEP8/uZ54iHTCnioHaPZWMfJ7o6TYIXjr+9ShKfoJxZLv7lHJ2wKQK3yOw4lg4cvja5nxQ6fNoGRwo+mQ/mgJQIDAQAB\""
name = "s1._domainkey.viktorbarzin.me"
proxied = false
ttl = 1
type = "TXT"
priority = 1
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "mail_spf" {
content = "\"v=spf1 include:mailgun.org ~all\""
name = "viktorbarzin.me"
proxied = false
ttl = 1
type = "TXT"
priority = 1
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "mail_dmarc" {
content = "\"v=DMARC1; p=quarantine; pct=100; fo=1; ri=3600; sp=quarantine; adkim=r; aspf=r; rua=mailto:e21c0ff8@dmarc.mailgun.org,mailto:adb84997@inbox.ondmarc.com; ruf=mailto:e21c0ff8@dmarc.mailgun.org,mailto:adb84997@inbox.ondmarc.com,mailto:postmaster@viktorbarzin.me;\""
name = "_dmarc.viktorbarzin.me"
proxied = false
ttl = 1
type = "TXT"
priority = 1
zone_id = var.cloudflare_zone_id
}
resource "cloudflare_record" "keyserver" {
content = "130.162.165.220" # Oracle VPS
name = "keyserver.viktorbarzin.me"
proxied = false
ttl = 3600
type = "A"
priority = 1
zone_id = var.cloudflare_zone_id
}
# Enable HTTP/3 (QUIC) for Cloudflare-proxied domains
resource "cloudflare_zone_settings_override" "http3" {
zone_id = var.cloudflare_zone_id
settings {
http3 = "on"
}
}

View file

@ -1,134 +0,0 @@
# Contents for cloudflare tunnel
variable "tls_secret_name" {}
variable "cloudflare_tunnel_token" {}
resource "kubernetes_namespace" "cloudflared" {
metadata {
name = "cloudflared"
labels = {
tier = var.tier
}
}
}
variable "tier" { type = string }
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.cloudflared.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_deployment" "cloudflared" {
metadata {
name = "cloudflared"
namespace = kubernetes_namespace.cloudflared.metadata[0].name
labels = {
app = "cloudflared"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 3
strategy {
type = "RollingUpdate"
}
selector {
match_labels = {
app = "cloudflared"
}
}
template {
metadata {
labels = {
app = "cloudflared"
}
annotations = {
"diun.enable" = "true"
"diun.include_tags" = "^\\d{4}\\.\\d+\\.\\d+$"
}
}
spec {
topology_spread_constraint {
max_skew = 1
topology_key = "kubernetes.io/hostname"
when_unsatisfiable = "ScheduleAnyway"
label_selector {
match_labels = {
app = "cloudflared"
}
}
}
container {
# image = "wisdomsky/cloudflared-web:latest"
image = "cloudflare/cloudflared:2026.3.0"
name = "cloudflared"
command = ["cloudflared", "tunnel", "run"]
env {
name = "TUNNEL_TOKEN"
value = var.cloudflare_tunnel_token
}
port {
container_port = 14333
}
resources {
requests = {
cpu = "15m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_pod_disruption_budget_v1" "cloudflared" {
metadata {
name = "cloudflared"
namespace = kubernetes_namespace.cloudflared.metadata[0].name
}
spec {
max_unavailable = "1"
selector {
match_labels = {
app = "cloudflared"
}
}
}
}
resource "kubernetes_service" "cloudflared" {
metadata {
name = "cloudflared"
namespace = kubernetes_namespace.cloudflared.metadata[0].name
labels = {
"app" = "cloudflared"
}
}
spec {
selector = {
app = "cloudflared"
}
port {
name = "http"
target_port = 14333
port = 80
protocol = "TCP"
}
}
}

View file

@ -1,53 +0,0 @@
variable "tier" { type = string }
# -----------------------------------------------------------------------------
# Namespace
# -----------------------------------------------------------------------------
resource "kubernetes_namespace" "cnpg_system" {
metadata {
name = "cnpg-system"
labels = {
tier = var.tier
}
}
}
# -----------------------------------------------------------------------------
# CloudNativePG Operator manages PostgreSQL clusters via CRDs
# https://cloudnative-pg.io/
# -----------------------------------------------------------------------------
resource "helm_release" "cnpg" {
namespace = kubernetes_namespace.cnpg_system.metadata[0].name
create_namespace = false
name = "cnpg"
atomic = true
timeout = 300
repository = "https://cloudnative-pg.github.io/charts"
chart = "cloudnative-pg"
version = "0.27.1"
values = [yamlencode({
crds = {
create = true
}
replicaCount = 1
resources = {
requests = {
cpu = "100m"
memory = "256Mi"
}
limits = {
memory = "256Mi"
}
}
})]
}
# NOTE: local-path-provisioner is already installed in the cluster
# (via cloud-init template) with StorageClass "local-path" (default).
# ReclaimPolicy is "Delete" for CNPG clusters, set
# .spec.storage.pvcTemplate.storageClassName = "local-path" in the
# Cluster CR. CNPG handles PVC lifecycle independently.

View file

@ -1,44 +0,0 @@
controller:
extraVolumes:
- name: crowdsec-bouncer-plugin
emptyDir: {}
extraInitContainers:
- name: init-clone-crowdsec-bouncer
image: crowdsecurity/lua-bouncer-plugin
imagePullPolicy: IfNotPresent
env:
- name: API_URL
value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name
- name: API_KEY
value: "<API KEY>" # generated with `cscli bouncers add -n <bouncer_name>
- name: BOUNCER_CONFIG
value: "/crowdsec/crowdsec-bouncer.conf"
- name: CAPTCHA_PROVIDER
value: "recaptcha" # valid providers are recaptcha, hcaptcha, turnstile
- name: SECRET_KEY
value: "<your-captcha-secret-key>" # If you want captcha support otherwise remove this ENV VAR
- name: SITE_KEY
value: "<your-captcha-site-key>" # If you want captcha support otherwise remove this ENV VAR
- name: BAN_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html
- name: CAPTCHA_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html
command:
[
"sh",
"-c",
"sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/",
]
volumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /lua_plugins
extraVolumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /etc/nginx/lua/plugins/crowdsec
subPath: crowdsec
config:
plugins: "crowdsec"
lua-shared-dicts: "crowdsec_cache: 50m"
server-snippet: |
lua_ssl_trusted_certificate "/etc/ssl/certs/ca-certificates.crt"; # If you want captcha support otherwise remove this line
resolver local=on ipv6=off;

View file

@ -1,376 +0,0 @@
variable "tls_secret_name" {}
variable "homepage_username" {}
variable "homepage_password" {}
variable "db_password" {}
variable "enroll_key" {}
variable "crowdsec_dash_api_key" {
type = string
sensitive = true
}
variable "crowdsec_dash_machine_id" { type = string } # used for web dash
variable "crowdsec_dash_machine_password" {
type = string
sensitive = true
}
variable "tier" { type = string }
variable "slack_webhook_url" { type = string }
variable "mysql_host" { type = string }
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "crowdsec" {
metadata {
name = "crowdsec"
labels = {
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "kubernetes_config_map" "crowdsec_custom_scenarios" {
metadata {
name = "crowdsec-custom-scenarios"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app.kubernetes.io/name" = "crowdsec"
}
}
data = {
"http-403-abuse.yaml" = <<-YAML
type: leaky
name: crowdsecurity/http-403-abuse
description: "Detect IPs triggering too many HTTP 403s in NGINX ingress logs"
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '403'"
groupby: "evt.Meta.source_ip"
leakspeed: "2s"
capacity: 10
blackhole: 5m
labels:
service: http
behavior: abusive_403
remediation: true
YAML
"http-429-abuse.yaml" : <<-YAML
type: leaky
name: crowdsecurity/http-429-abuse
description: "Detect IPs repeatedly triggering rate-limit (HTTP 429)"
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '429'"
groupby: "evt.Meta.source_ip"
leakspeed: "10s"
capacity: 5
blackhole: 1m
labels:
service: http
behavior: rate_limit_abuse
remediation: true
YAML
}
}
# Whitelist for trusted IPs that should never be blocked
resource "kubernetes_config_map" "crowdsec_whitelist" {
metadata {
name = "crowdsec-whitelist"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app.kubernetes.io/name" = "crowdsec"
}
}
data = {
"whitelist.yaml" = <<-YAML
name: crowdsecurity/whitelist-trusted-ips
description: "Whitelist for trusted IPs that should never be blocked"
whitelist:
reason: "Trusted IP - never block"
ip:
- "176.12.22.76"
YAML
}
}
resource "helm_release" "crowdsec" {
namespace = kubernetes_namespace.crowdsec.metadata[0].name
create_namespace = true
name = "crowdsec"
atomic = true
version = "0.21.0"
repository = "https://crowdsecurity.github.io/helm-charts"
chart = "crowdsec"
values = [templatefile("${path.module}/values.yaml", { homepage_username = var.homepage_username, homepage_password = var.homepage_password, DB_PASSWORD = var.db_password, ENROLL_KEY = var.enroll_key, SLACK_WEBHOOK_URL = var.slack_webhook_url, mysql_host = var.mysql_host })]
timeout = 900
wait = true
wait_for_jobs = true
}
# Deployment for my custom dashboard that helps me unblock myself when I blocklist myself
resource "kubernetes_deployment" "crowdsec-web" {
metadata {
name = "crowdsec-web"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
app = "crowdsec_web"
"kubernetes.io/cluster-service" = "true"
tier = var.tier
}
}
spec {
replicas = 1
strategy {
type = "RollingUpdate"
}
selector {
match_labels = {
app = "crowdsec_web"
}
}
template {
metadata {
labels = {
app = "crowdsec_web"
"kubernetes.io/cluster-service" = "true"
}
}
spec {
priority_class_name = "tier-1-cluster"
container {
name = "crowdsec-web"
image = "viktorbarzin/crowdsec_web"
env {
name = "CS_API_URL"
value = "http://crowdsec-service.crowdsec.svc.cluster.local:8080/v1"
}
env {
name = "CS_API_KEY"
value = var.crowdsec_dash_api_key
}
env {
name = "CS_MACHINE_ID"
value = var.crowdsec_dash_machine_id
}
env {
name = "CS_MACHINE_PASSWORD"
value = var.crowdsec_dash_machine_password
}
port {
name = "http"
container_port = 8000
protocol = "TCP"
}
resources {
requests = {
cpu = "15m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "crowdsec-web" {
metadata {
name = "crowdsec-web"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app" = "crowdsec_web"
}
}
spec {
selector = {
app = "crowdsec_web"
}
port {
port = "80"
target_port = "8000"
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
name = "crowdsec-web"
protected = true
tls_secret_name = var.tls_secret_name
exclude_crowdsec = true
rybbit_site_id = "d09137795ccc"
}
# CronJob to import public blocklists into CrowdSec
# https://github.com/wolffcatskyy/crowdsec-blocklist-import
# Uses kubectl exec to run in an existing CrowdSec agent pod that's already registered
resource "kubernetes_cron_job_v1" "crowdsec_blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
app = "crowdsec-blocklist-import"
tier = var.tier
}
}
spec {
# Run daily at 4 AM
schedule = "0 4 * * *"
timezone = "Europe/London"
concurrency_policy = "Forbid"
successful_jobs_history_limit = 3
failed_jobs_history_limit = 3
job_template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
backoff_limit = 3
template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
service_account_name = kubernetes_service_account.blocklist_import.metadata[0].name
restart_policy = "OnFailure"
container {
name = "blocklist-import"
image = "bitnami/kubectl:latest"
command = ["/bin/bash", "-c"]
args = [
<<-EOF
set -e
echo "Finding CrowdSec agent pod..."
AGENT_POD=$(kubectl get pods -n crowdsec -l k8s-app=crowdsec,type=agent -o jsonpath='{.items[0].metadata.name}')
if [ -z "$AGENT_POD" ]; then
echo "ERROR: Could not find CrowdSec agent pod"
exit 1
fi
echo "Using agent pod: $AGENT_POD"
# Download the import script
echo "Downloading blocklist import script..."
curl -fsSL -o /tmp/import.sh \
https://raw.githubusercontent.com/wolffcatskyy/crowdsec-blocklist-import/main/import.sh
chmod +x /tmp/import.sh
# Copy script to agent pod and execute
echo "Copying script to agent pod and executing..."
kubectl cp /tmp/import.sh crowdsec/$AGENT_POD:/tmp/import.sh
kubectl exec -n crowdsec "$AGENT_POD" -- /bin/bash -c '
set -e
# Run with native mode since we are inside the CrowdSec container
export MODE=native
export DECISION_DURATION=24h
export FETCH_TIMEOUT=60
export LOG_LEVEL=INFO
/tmp/import.sh
# Cleanup
rm -f /tmp/import.sh
'
echo "Blocklist import completed successfully!"
EOF
]
}
}
}
}
}
}
}
# Service account for the blocklist import job (needs kubectl exec permissions)
resource "kubernetes_service_account" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}
resource "kubernetes_role" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["get", "list"]
}
rule {
api_groups = [""]
resources = ["pods/exec"]
verbs = ["create"]
}
}
resource "kubernetes_role_binding" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = kubernetes_role.blocklist_import.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.blocklist_import.metadata[0].name
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}
# Custom ResourceQuota for CrowdSec needs more than default 1-cluster quota
# because it runs DaemonSet agents (1 per worker node) + 3 LAPI replicas + web UI
resource "kubernetes_resource_quota" "crowdsec" {
metadata {
name = "crowdsec-quota"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "4"
"requests.memory" = "8Gi"
"limits.memory" = "16Gi"
pods = "30"
}
}
}

View file

@ -1,229 +0,0 @@
# values from - https://github.com/crowdsecurity/helm-charts/blob/main/charts/crowdsec/values.yaml
container_runtime: containerd
agent:
resources:
requests:
cpu: 25m
memory: 64Mi
limits:
memory: 512Mi
priorityClassName: "tier-1-cluster"
# To specify each pod you want to process it logs (pods present in the node)
acquisition:
# The namespace where the pod is located
- namespace: traefik
# The pod name
podName: traefik-*
# as in crowdsec configuration, we need to specify the program name so the parser will match and parse logs
program: traefik
# Those are ENV variables
env:
# As it's a test, we don't want to share signals with CrowdSec so disable the Online API.
# - name: DISABLE_ONLINE_API
# value: "true"
# As we are running Traefik, we want to install the Traefik collection
- name: COLLECTIONS
value: "crowdsecurity/traefik crowdsecurity/base-http-scenarios crowdsecurity/http-cve"
- name: SCENARIOS
value: ""
# value: "crowdsecurity/http-crawl-aggressive"
# Mount custom scenarios into /etc/crowdsec/scenarios
extraVolumeMounts:
- name: custom-scenarios
mountPath: /etc/crowdsec/scenarios/http-403-abuse.yaml
subPath: "http-403-abuse.yaml"
readonly: true
- name: custom-scenarios
mountPath: /etc/crowdsec/scenarios/http-429-abuse.yaml
subPath: "http-429-abuse.yaml"
readonly: true
- name: whitelist
mountPath: /etc/crowdsec/parsers/s02-enrich/whitelist.yaml
subPath: "whitelist.yaml"
readonly: true
extraVolumes:
- name: custom-scenarios
configMap:
name: crowdsec-custom-scenarios
- name: whitelist
configMap:
name: crowdsec-whitelist
podAnnotations:
dependency.kyverno.io/wait-for: "mysql.dbaas:3306"
lapi:
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
memory: 1Gi
startupProbe:
httpGet:
path: /health
port: 8080
failureThreshold: 30
periodSeconds: 10
priorityClassName: "tier-1-cluster"
replicas: 3
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: crowdsec
type: lapi
pdb:
enabled: true
maxUnavailable: 1
extraSecrets:
dbPassword: "${DB_PASSWORD}"
storeCAPICredentialsInSecret: true
persistentVolume:
config:
enabled: false
data:
enabled: false
env:
- name: ENROLL_KEY
value: "${ENROLL_KEY}"
- name: ENROLL_INSTANCE_NAME
value: "k8s-cluster"
- name: ENROLL_TAGS
value: "k8s linux"
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: crowdsec-lapi-secrets
key: dbPassword
# As it's a test, we don't want to share signals with CrowdSec, so disable the Online API.
# - name: DISABLE_ONLINE_API
# value: "true"
dashboard:
enabled: true
env:
- name: MB_DB_TYPE
value: "mysql"
- name: MB_DB_DBNAME
value: crowdsec-metabase
- name: MB_DB_USER
value: "crowdsec"
- name: MB_DB_PASS
value: "${DB_PASSWORD}"
- name: MB_DB_HOST
value: "${mysql_host}"
- name: MB_EMAIL_SMTP_USERNAME
value: "info@viktorbarzin.me"
- name: MB_EMAIL_FROM_ADDRESS
value: "info@viktorbarzin.me"
- name: MB_EMAIL_SMTP_HOST
value: "mailserver.mailserver.svc.cluster.local"
- name: MB_EMAIL_SMTP_PASSWORD
value: "" # Ignore for now as it's unclear what notifications we can get
- name: MB_EMAIL_SMTP_PORT
value: "587"
- name: MB_EMAIL_SMTP_SECURITY
value: "starttls"
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
#nginx.ingress.kubernetes.io/auth-url: "https://oauth2.viktorbarzin.me/oauth2/auth"
nginx.ingress.kubernetes.io/auth-url: "http://ak-outpost-authentik-embedded-outpost.authentik.svc.cluster.local:9000/outpost.goauthentik.io/auth/nginx"
# nginx.ingress.kubernetes.io/auth-signin: "https://oauth2.viktorbarzin.me/oauth2/start?rd=/redirect/$http_host$escaped_request_uri"
nginx.ingress.kubernetes.io/auth-signin: "https://authentik.viktorbarzin.me/outpost.goauthentik.io/start?rd=$scheme%3A%2F%2F$host$escaped_request_uri"
nginx.ingress.kubernetes.io/auth-response-headers: "Set-Cookie,X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid"
nginx.ingress.kubernetes.io/auth-snippet: "proxy_set_header X-Forwarded-Host $http_host;"
gethomepage.dev/enabled: "true"
gethomepage.dev/description: "Web Application Firewall"
gethomepage.dev/icon: "crowdsec.png"
gethomepage.dev/name: "CrowdSec"
gethomepage.dev/group: "Identity & Security"
gethomepage.dev/widget.type: "crowdsec"
gethomepage.dev/widget.url: "http://crowdsec-service.crowdsec.svc.cluster.local:8080"
gethomepage.dev/widget.username: "${homepage_username}"
gethomepage.dev/widget.password: "${homepage_password}"
gethomepage.dev/pod-selector: ""
ingressClassName: "nginx"
host: "crowdsec.viktorbarzin.me"
tls:
- hosts:
- crowdsec.viktorbarzin.me
secretName: "tls-secret"
metrics:
enabled: true
strategy:
type: RollingUpdate
config:
# Custom profiles: captcha for rate limiting, ban for attacks
profiles.yaml: |
# Captcha for rate limiting and 403 abuse (user can unblock themselves)
name: captcha_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Ip" && Alert.GetScenario() in ["crowdsecurity/http-429-abuse", "crowdsecurity/http-403-abuse", "crowdsecurity/http-crawl-non_statics", "crowdsecurity/http-sensitive-files"]
decisions:
- type: captcha
duration: 4h
notifications:
- slack_alerts
on_success: break
---
# Default: Ban for serious attacks (CVE exploits, scanners, brute force)
name: default_ip_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Ip"
decisions:
- type: ban
duration: 4h
notifications:
- slack_alerts
on_success: break
---
name: default_range_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Range"
decisions:
- type: ban
duration: 4h
notifications:
- slack_alerts
on_success: break
config.yaml.local: |
db_config:
type: mysql
user: crowdsec
password: ${DB_PASSWORD}
db_name: crowdsec
host: ${mysql_host}
port: 3306
api:
server:
auto_registration: # Activate if not using TLS for authentication
enabled: true
token: "$${REGISTRATION_TOKEN}" # /!\ do not change
allowed_ranges: # /!\ adapt to the pod IP ranges used by your cluster
- "127.0.0.1/32"
- "192.168.0.0/16"
- "10.0.0.0/8"
- "172.16.0.0/12"
notifications:
slack.yaml: |
type: slack
name: slack_alerts
log_level: info
format: |
:rotating_light: *CrowdSec Alert*
{{range .}}
*Scenario:* {{.Alert.Scenario}}
*Source IP:* {{.Alert.Source.IP}} ({{.Alert.Source.Cn}})
*Decisions:*
{{range .Alert.Decisions}} - {{.Type}} for {{.Duration}} (scope: {{.Scope}}, value: {{.Value}})
{{end}}
{{end}}
webhook: ${SLACK_WEBHOOK_URL}

View file

@ -1,16 +0,0 @@
tls:
useSelfSigned: true
credentials:
root:
password: ${root_password}
user: root
serverInstances: 1
podSpec:
containers:
- name: mysql
resources:
requests:
memory: "1024Mi" # adapt to your needs
cpu: "100m" # adapt to your needs
limits:
memory: "2048Mi" # adapt to your needs

View file

@ -1,30 +0,0 @@
apiVersion: mysql.presslabs.org/v1alpha1
kind: MysqlCluster
metadata:
name: mysql-cluster
namespace: dbaas
spec:
mysqlVersion: "5.7"
replicas: 1
secretName: cluster-secret
mysqlConf:
# read_only: 0 # mysql forms a single transaction for each sql statement, autocommit for each statement
# automatic_sp_privileges: "ON" # automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine
# auto_generate_certs: "ON" # Auto Generation of Certificate
# auto_increment_increment: 1 # Auto Incrementing value from +1
# auto_increment_offset: 1 # Auto Increment Offset
# binlog-format: "STATEMENT" # contains various options such ROW(SLOW,SAFE) STATEMENT(FAST,UNSAFE), MIXED(combination of both)
# wait_timeout: 31536000 # 28800 number of seconds the server waits for activity on a non-interactive connection before closing it, You might encounter MySQL server has gone away error, you then tweak this value acccordingly
# interactive_timeout: 28800 # The number of seconds the server waits for activity on an interactive connection before closing it.
# max_allowed_packet: "512M" # Maximum size of MYSQL Network protocol packet that the server can create or read 4MB, 8MB, 16MB, 32MB
# max-binlog-size: 1073741824 # binary logs contains the events that describe database changes, this parameter describe size for the bin_log file.
# log_output: "TABLE" # Format in which the logout will be dumped
# master-info-repository: "TABLE" # Format in which the master info will be dumped
# relay_log_info_repository: "TABLE" # Format in which the relay info will be dumped
volumeSpec:
persistentVolumeClaim:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

File diff suppressed because it is too large Load diff

View file

@ -1,14 +0,0 @@
---
orchestrator:
# persistence:
# enabled: false
ingress:
enable: false
hosts:
- host: db.viktorbarzin.me
paths:
- path: /
tls:
- secretName: ${secretName}
hosts:
- db.viktorbarzin.me

View file

@ -1,30 +0,0 @@
# Use the PostGIS image as the base
FROM pgvector/pgvector:0.8.0-pg16 as binary
FROM postgis/postgis:16-master
COPY --from=binary /pgvecto-rs-binary-release.deb /tmp/vectors.deb
RUN apt-get install -y /tmp/vectors.deb && rm -f /tmp/vectors.deb
# Install necessary packages
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
libpq-dev \
wget \
git \
postgresql-server-dev-16 \
postgresql-16-pgvector \
# Clean up to reduce layer size
&& rm -rf /var/lib/apt/lists/* \
&& cd /tmp \
&& git clone --branch v0.8.0 https://github.com/pgvector/pgvector.git \
&& cd pgvector \
&& make \
&& make install \
# Clean up unnecessary files
&& cd - \
&& apt-get purge -y --auto-remove build-essential postgresql-server-dev-16 libpq-dev wget git \
&& rm -rf /tmp/pgvector
# Copy initialization scripts
#COPY ./docker-entrypoint-initdb.d/ /docker-entrypoint-initdb.d/
CMD ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", "search_path=\"$user\", public, vectors", "-c", "logging_collector=on"]

View file

@ -1,9 +0,0 @@
# terraform {
# required_providers {
# kubectl = {
# source = "gavinbunney/kubectl"
# version = ">= 1.10.0"
# }
# }
# required_version = ">= 0.13"
# }

View file

@ -1,324 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "headscale_config" {}
variable "headscale_acl" {}
variable "nfs_server" { type = string }
variable "homepage_token" {
type = string
default = ""
sensitive = true
}
resource "kubernetes_namespace" "headscale" {
metadata {
name = "headscale"
labels = {
tier = var.tier
}
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.headscale.metadata[0].name
tls_secret_name = var.tls_secret_name
}
module "nfs_data" {
source = "../../../../modules/kubernetes/nfs_volume"
name = "headscale-data"
namespace = kubernetes_namespace.headscale.metadata[0].name
nfs_server = var.nfs_server
nfs_path = "/mnt/main/headscale"
}
resource "kubernetes_deployment" "headscale" {
metadata {
name = "headscale"
namespace = kubernetes_namespace.headscale.metadata[0].name
labels = {
app = "headscale"
tier = var.tier
# scare to try but probably non-http will fail
# "istio-injection" : "enabled"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "headscale"
}
}
template {
metadata {
labels = {
app = "headscale"
}
annotations = {
"diun.enable" = "true"
"diun.include_tags" = "^\\d+(?:\\.\\d+)?(?:\\.\\d+)?$"
}
}
spec {
container {
image = "headscale/headscale:0.28.0"
# image = "headscale/headscale:0.28.0-debug" # -debug is for debug images
name = "headscale"
command = ["headscale", "serve"]
resources {
requests = {
cpu = "50m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
port {
container_port = 8080
}
port {
container_port = 9090
}
port {
container_port = 41641
}
liveness_probe {
http_get {
path = "/health"
port = 8080
}
initial_delay_seconds = 15
period_seconds = 30
timeout_seconds = 5
failure_threshold = 5
}
readiness_probe {
http_get {
path = "/health"
port = 8080
}
initial_delay_seconds = 5
period_seconds = 30
timeout_seconds = 5
failure_threshold = 3
}
volume_mount {
name = "config-volume"
mount_path = "/etc/headscale"
}
volume_mount {
mount_path = "/mnt"
name = "nfs-config"
}
}
volume {
name = "config-volume"
config_map {
name = "headscale-config"
items {
key = "config.yaml"
path = "config.yaml"
}
items {
key = "acl.yaml"
path = "acl.yaml"
}
}
}
volume {
name = "nfs-config"
persistent_volume_claim {
claim_name = module.nfs_data.claim_name
}
}
# container {
# image = "simcu/headscale-ui:0.1.4"
# name = "headscale-ui"
# port {
# container_port = 80
# }
# }
container {
image = "ghcr.io/gurucomputing/headscale-ui:latest"
# image = "ghcr.io/tale/headplane:0.3.2"
name = "headscale-ui"
resources {
requests = {
cpu = "25m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
port {
container_port = 8081
# container_port = 3000
}
env {
name = "HTTP_PORT"
value = "8081"
}
# env {
# name = "HTTPS_PORT"
# value = "8082"
# }
env {
name = "HEADSCALE_URL"
value = "http://localhost:8080"
}
env {
name = "COOKIE_SECRET"
value = "kekekekke"
}
env {
name = "ROOT_API_KEY"
value = "kekekekeke"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "headscale" {
metadata {
name = "headscale"
namespace = kubernetes_namespace.headscale.metadata[0].name
labels = {
"app" = "headscale"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/port" = "9090"
}
# annotations = {
# "metallb.universe.tf/allow-shared-ip" : "shared"
# }
}
spec {
# type = "LoadBalancer"
# external_traffic_policy = "Cluster"
selector = {
app = "headscale"
}
port {
name = "headscale"
port = "8080"
protocol = "TCP"
}
port {
name = "headscale-ui"
port = "80"
target_port = 8081
# target_port = 3000
protocol = "TCP"
}
port {
name = "metrics"
port = "9090"
protocol = "TCP"
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.headscale.metadata[0].name
name = "headscale"
port = 8080
tls_secret_name = var.tls_secret_name
extra_annotations = {
"gethomepage.dev/enabled" = "true"
"gethomepage.dev/name" = "Headscale"
"gethomepage.dev/description" = "VPN mesh network"
"gethomepage.dev/icon" = "headscale.png"
"gethomepage.dev/group" = "Identity & Security"
"gethomepage.dev/pod-selector" = ""
}
}
module "ingress-ui" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.headscale.metadata[0].name
name = "headscale-ui"
host = "headscale"
service_name = "headscale"
port = 8081
ingress_path = ["/web"]
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_service" "headscale-server" {
metadata {
name = "headscale-server"
namespace = kubernetes_namespace.headscale.metadata[0].name
labels = {
"app" = "headscale"
}
annotations = {
"metallb.io/loadBalancerIPs" = "10.0.20.200"
"metallb.io/allow-shared-ip" = "shared"
}
}
spec {
type = "LoadBalancer"
external_traffic_policy = "Cluster"
selector = {
app = "headscale"
}
# port {
# name = "headscale-tcp"
# port = "41641"
# protocol = "TCP"
# }
port {
name = "headscale-udp"
port = "41641"
protocol = "UDP"
}
}
}
resource "kubernetes_config_map" "headscale-config" {
metadata {
name = "headscale-config"
namespace = kubernetes_namespace.headscale.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"config.yaml" = var.headscale_config
"acl.yaml" = var.headscale_acl
}
}

View file

@ -1,274 +0,0 @@
# Module to run some infra-specific things like updating the public ip
variable "git_user" {}
variable "git_token" {}
variable "technitium_username" {}
variable "technitium_password" {}
variable "nfs_server" { type = string }
# DISABLED WHILST USING CLOUDFLARE NS
# resource "kubernetes_cron_job_v1" "update-public-ip" {
# metadata {
# name = "update-public-ip"
# namespace = "default"
# }
# spec {
# schedule = "*/5 * * * *"
# successful_jobs_history_limit = 1
# failed_jobs_history_limit = 1
# concurrency_policy = "Forbid"
# job_template {
# metadata {
# name = "update-public-ip"
# }
# spec {
# template {
# metadata {
# name = "update-public-ip"
# }
# spec {
# priority_class_name = "system-cluster-critical"
# container {
# name = "update-public-ip"
# image = "viktorbarzin/infra"
# command = ["./infra_cli"]
# args = ["-use-case", "update-public-ip"]
# env {
# name = "GIT_USER"
# value = var.git_user
# }
# env {
# name = "GIT_TOKEN"
# value = var.git_token
# }
# env {
# name = "TECHNITIUM_USERNAME"
# value = var.technitium_username
# }
# env {
# name = "TECHNITIUM_PASSWORD"
# value = var.technitium_password
# }
# }
# restart_policy = "Never"
# # service_account_name = "descheduler-sa"
# # volume {
# # name = "policy-volume"
# # config_map {
# # name = "policy-configmap"
# # }
# # }
# }
# }
# }
# }
# }
# }
module "nfs_etcd_backup" {
source = "../../../../modules/kubernetes/nfs_volume"
name = "infra-etcd-backup"
namespace = "default"
nfs_server = var.nfs_server
nfs_path = "/mnt/main/etcd-backup"
}
# # backup etcd
resource "kubernetes_cron_job_v1" "backup-etcd" {
metadata {
name = "backup-etcd"
namespace = "default"
}
spec {
schedule = "0 0 * * *"
successful_jobs_history_limit = 1
failed_jobs_history_limit = 1
concurrency_policy = "Forbid"
job_template {
metadata {
name = "backup-etcd"
}
spec {
template {
metadata {
name = "backup-etcd"
}
spec {
node_name = "k8s-master"
priority_class_name = "system-cluster-critical"
host_network = true
container {
name = "backup-etcd"
image = "registry.k8s.io/etcd:3.5.21-0"
command = ["/bin/sh", "-c"]
args = ["ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /backup/etcd-snapshot-$(date +%Y%m%d-%H%M%S).db"]
env {
name = "ETCDCTL_API"
value = "3"
}
volume_mount {
mount_path = "/backup"
name = "backup"
}
volume_mount {
mount_path = "/etc/kubernetes/pki/etcd"
name = "etcd-certs"
read_only = true
}
}
container {
name = "backup-purge"
image = "busybox:1.31.1"
command = ["/bin/sh"]
args = ["-c", "find /backup -type f -mtime +30 -name '*.db' -exec rm -- '{}' \\;"]
volume_mount {
mount_path = "/backup"
name = "backup"
}
}
volume {
name = "backup"
persistent_volume_claim {
claim_name = module.nfs_etcd_backup.claim_name
}
}
volume {
name = "etcd-certs"
host_path {
path = "/etc/kubernetes/pki/etcd"
type = "DirectoryOrCreate"
}
}
restart_policy = "Never"
}
}
}
}
}
}
# Weekly etcd defragmentation prevents fragmentation buildup that causes slow requests
resource "kubernetes_cron_job_v1" "defrag-etcd" {
metadata {
name = "defrag-etcd"
namespace = "default"
}
spec {
schedule = "0 3 * * 0"
successful_jobs_history_limit = 1
failed_jobs_history_limit = 1
concurrency_policy = "Forbid"
job_template {
metadata {
name = "defrag-etcd"
}
spec {
template {
metadata {
name = "defrag-etcd"
}
spec {
node_name = "k8s-master"
priority_class_name = "system-cluster-critical"
host_network = true
container {
name = "defrag-etcd"
image = "registry.k8s.io/etcd:3.5.21-0"
command = ["etcdctl"]
args = ["--endpoints=https://127.0.0.1:2379", "--cacert=/etc/kubernetes/pki/etcd/ca.crt", "--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt", "--key=/etc/kubernetes/pki/etcd/healthcheck-client.key", "--command-timeout=60s", "defrag"]
env {
name = "ETCDCTL_API"
value = "3"
}
volume_mount {
mount_path = "/etc/kubernetes/pki/etcd"
name = "etcd-certs"
read_only = true
}
}
volume {
name = "etcd-certs"
host_path {
path = "/etc/kubernetes/pki/etcd"
type = "DirectoryOrCreate"
}
}
restart_policy = "Never"
}
}
}
}
}
}
# Clean up evicted/failed pods cluster-wide daily
resource "kubernetes_cron_job_v1" "cleanup-failed-pods" {
metadata {
name = "cleanup-failed-pods"
namespace = "default"
}
spec {
schedule = "0 2 * * *"
successful_jobs_history_limit = 1
failed_jobs_history_limit = 1
concurrency_policy = "Forbid"
job_template {
metadata {
name = "cleanup-failed-pods"
}
spec {
template {
metadata {
name = "cleanup-failed-pods"
}
spec {
service_account_name = kubernetes_service_account.cleanup_sa.metadata[0].name
container {
name = "cleanup"
image = "bitnami/kubectl:latest"
command = ["/bin/sh", "-c", "kubectl delete pods -A --field-selector=status.phase=Failed --ignore-not-found"]
}
restart_policy = "Never"
}
}
}
}
}
}
resource "kubernetes_service_account" "cleanup_sa" {
metadata {
name = "failed-pod-cleanup"
namespace = "default"
}
}
resource "kubernetes_cluster_role" "cleanup_role" {
metadata {
name = "failed-pod-cleanup"
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["list", "delete"]
}
}
resource "kubernetes_cluster_role_binding" "cleanup_binding" {
metadata {
name = "failed-pod-cleanup"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.cleanup_role.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.cleanup_sa.metadata[0].name
namespace = "default"
}
}

View file

@ -1,148 +0,0 @@
resource "kubernetes_namespace" "iscsi_csi" {
metadata {
name = "iscsi-csi"
labels = {
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "helm_release" "democratic_csi" {
namespace = kubernetes_namespace.iscsi_csi.metadata[0].name
create_namespace = false
name = "democratic-csi-iscsi"
atomic = true
timeout = 300
repository = "https://democratic-csi.github.io/charts/"
chart = "democratic-csi"
values = [yamlencode({
csiDriver = {
name = "org.democratic-csi.iscsi"
}
storageClasses = [{
name = "iscsi-truenas"
defaultClass = false
reclaimPolicy = "Retain"
volumeBindingMode = "Immediate"
allowVolumeExpansion = true
parameters = {
fsType = "ext4"
}
mountOptions = []
}]
controller = {
replicas = 2
driver = {
resources = {
requests = { cpu = "25m", memory = "192Mi" }
limits = { memory = "192Mi" }
}
}
externalProvisioner = {
resources = {
requests = { cpu = "5m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
}
externalAttacher = {
resources = {
requests = { cpu = "5m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
}
externalResizer = {
resources = {
requests = { cpu = "5m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
}
externalSnapshotter = {
resources = {
requests = { cpu = "5m", memory = "80Mi" }
limits = { memory = "80Mi" }
}
}
}
# csiProxy is a top-level chart key, NOT nested under controller/node
csiProxy = {
resources = {
requests = { cpu = "5m", memory = "32Mi" }
limits = { memory = "32Mi" }
}
}
node = {
driver = {
resources = {
requests = { cpu = "25m", memory = "192Mi" }
limits = { memory = "192Mi" }
}
}
driverRegistrar = {
resources = {
requests = { cpu = "5m", memory = "32Mi" }
limits = { memory = "32Mi" }
}
}
cleanup = {
resources = {
requests = { cpu = "5m", memory = "32Mi" }
limits = { memory = "32Mi" }
}
}
hostPID = true
hostPath = "/lib/modules"
}
driver = {
config = {
driver = "freenas-iscsi"
instance_id = "truenas-iscsi"
httpConnection = {
protocol = "http"
host = var.truenas_host
port = 80
apiKey = var.truenas_api_key
}
sshConnection = {
host = var.truenas_host
port = 22
username = "root"
privateKey = var.truenas_ssh_private_key
}
zfs = {
datasetParentName = "main/iscsi"
detachedSnapshotsDatasetParentName = "main/iscsi-snaps"
}
iscsi = {
targetPortal = "${var.truenas_host}:3260"
namePrefix = "csi-"
nameSuffix = ""
targetGroups = [{
targetGroupPortalGroup = 1
targetGroupInitiatorGroup = 1
targetGroupAuthType = "None"
}]
extentInsecureTpc = true
extentXenCompat = false
extentDisablePhysicalBlocksize = true
extentBlocksize = 512
extentRpm = "SSD"
extentAvailThreshold = 0
}
}
}
})]
}

View file

@ -1,10 +0,0 @@
variable "tier" { type = string }
variable "truenas_host" { type = string }
variable "truenas_api_key" {
type = string
sensitive = true
}
variable "truenas_ssh_private_key" {
type = string
sensitive = true
}

View file

@ -1,23 +0,0 @@
node_modules
# Output
.output
.vercel
.netlify
.wrangler
/.svelte-kit
/build
# OS
.DS_Store
Thumbs.db
# Env
.env
.env.*
!.env.example
!.env.test
# Vite
vite.config.js.timestamp-*
vite.config.ts.timestamp-*

View file

@ -1 +0,0 @@
engine-strict=true

View file

@ -1,15 +0,0 @@
FROM node:22-alpine AS build
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
FROM node:22-alpine
WORKDIR /app
COPY --from=build /app/build ./build
COPY --from=build /app/package.json ./
COPY --from=build /app/node_modules ./node_modules
ENV PORT=3000
EXPOSE 3000
CMD ["node", "build"]

View file

@ -1,42 +0,0 @@
# sv
Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli).
## Creating a project
If you're seeing this, you've probably already done this step. Congrats!
```sh
# create a new project
npx sv create my-app
```
To recreate this project with the same configuration:
```sh
# recreate this project
npx sv create --template minimal --types ts --install npm .
```
## Developing
Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
```sh
npm run dev
# or start the server and open the app in a new browser tab
npm run dev -- --open
```
## Building
To create a production version of your app:
```sh
npm run build
```
You can preview the production build with `npm run preview`.
> To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment.

File diff suppressed because it is too large Load diff

View file

@ -1,24 +0,0 @@
{
"name": "files",
"private": true,
"version": "0.0.1",
"type": "module",
"scripts": {
"dev": "vite dev",
"build": "vite build",
"preview": "vite preview",
"prepare": "svelte-kit sync || echo ''",
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
},
"devDependencies": {
"@sveltejs/adapter-auto": "^7.0.0",
"@sveltejs/adapter-node": "^5.5.3",
"@sveltejs/kit": "^2.50.2",
"@sveltejs/vite-plugin-svelte": "^6.2.4",
"svelte": "^5.49.2",
"svelte-check": "^4.3.6",
"typescript": "^5.9.3",
"vite": "^7.3.1"
}
}

View file

@ -1,13 +0,0 @@
// See https://svelte.dev/docs/kit/types#app.d.ts
// for information about these interfaces
declare global {
namespace App {
// interface Error {}
// interface Locals {}
// interface PageData {}
// interface PageState {}
// interface Platform {}
}
}
export {};

View file

@ -1,11 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
%sveltekit.head%
</head>
<body data-sveltekit-preload-data="hover">
<div style="display: contents">%sveltekit.body%</div>
</body>
</html>

View file

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="107" height="128" viewBox="0 0 107 128"><title>svelte-logo</title><path d="M94.157 22.819c-10.4-14.885-30.94-19.297-45.792-9.835L22.282 29.608A29.92 29.92 0 0 0 8.764 49.65a31.5 31.5 0 0 0 3.108 20.231 30 30 0 0 0-4.477 11.183 31.9 31.9 0 0 0 5.448 24.116c10.402 14.887 30.942 19.297 45.791 9.835l26.083-16.624A29.92 29.92 0 0 0 98.235 78.35a31.53 31.53 0 0 0-3.105-20.232 30 30 0 0 0 4.474-11.182 31.88 31.88 0 0 0-5.447-24.116" style="fill:#ff3e00"/><path d="M45.817 106.582a20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.503 18 18 0 0 1 .624-2.435l.49-1.498 1.337.981a33.6 33.6 0 0 0 10.203 5.098l.97.294-.09.968a5.85 5.85 0 0 0 1.052 3.878 6.24 6.24 0 0 0 6.695 2.485 5.8 5.8 0 0 0 1.603-.704L69.27 76.28a5.43 5.43 0 0 0 2.45-3.631 5.8 5.8 0 0 0-.987-4.371 6.24 6.24 0 0 0-6.698-2.487 5.7 5.7 0 0 0-1.6.704l-9.953 6.345a19 19 0 0 1-5.296 2.326 20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.502 17.99 17.99 0 0 1 8.13-12.052l26.081-16.623a19 19 0 0 1 5.3-2.329 20.72 20.72 0 0 1 22.237 8.243 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-.624 2.435l-.49 1.498-1.337-.98a33.6 33.6 0 0 0-10.203-5.1l-.97-.294.09-.968a5.86 5.86 0 0 0-1.052-3.878 6.24 6.24 0 0 0-6.696-2.485 5.8 5.8 0 0 0-1.602.704L37.73 51.72a5.42 5.42 0 0 0-2.449 3.63 5.79 5.79 0 0 0 .986 4.372 6.24 6.24 0 0 0 6.698 2.486 5.8 5.8 0 0 0 1.602-.704l9.952-6.342a19 19 0 0 1 5.295-2.328 20.72 20.72 0 0 1 22.237 8.242 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-8.13 12.053l-26.081 16.622a19 19 0 0 1-5.3 2.328" style="fill:#fff"/></svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

View file

@ -1 +0,0 @@
// place files you want to import through the `$lib` alias in this folder.

View file

@ -1,64 +0,0 @@
<script lang="ts">
import favicon from '$lib/assets/favicon.svg';
import { page } from '$app/stores';
let { children } = $props();
</script>
<svelte:head>
<link rel="icon" href={favicon} />
</svelte:head>
<nav>
<div class="nav-inner">
<a href="/" class="brand">K8s Portal</a>
<div class="links">
<a href="/onboarding" class:active={$page.url.pathname === '/onboarding'}>Getting Started</a>
<a href="/architecture" class:active={$page.url.pathname === '/architecture'}>Architecture</a>
<a href="/services" class:active={$page.url.pathname === '/services'}>Services</a>
<a href="/contributing" class:active={$page.url.pathname === '/contributing'}>Contributing</a>
<a href="/troubleshooting" class:active={$page.url.pathname === '/troubleshooting'}>Help</a>
</div>
</div>
</nav>
{@render children()}
<style>
nav {
background: #1a1a2e;
padding: 0.75rem 1rem;
position: sticky;
top: 0;
z-index: 100;
}
.nav-inner {
max-width: 768px;
margin: 0 auto;
display: flex;
align-items: center;
gap: 1.5rem;
flex-wrap: wrap;
}
.brand {
color: #e0e0e0;
text-decoration: none;
font-weight: 700;
font-size: 1.1rem;
}
.links {
display: flex;
gap: 1rem;
flex-wrap: wrap;
}
.links a {
color: #a0a0c0;
text-decoration: none;
font-size: 0.9rem;
padding: 0.25rem 0;
}
.links a:hover, .links a.active {
color: #ffffff;
border-bottom: 2px solid #4fc3f7;
}
</style>

View file

@ -1,33 +0,0 @@
import type { PageServerLoad } from './$types';
import { readFileSync } from 'fs';
interface UserRole {
role: string;
namespaces: string[];
}
export const load: PageServerLoad = async ({ request }) => {
const email = request.headers.get('x-authentik-email') || 'unknown';
const username = request.headers.get('x-authentik-username') || 'unknown';
const groups = request.headers.get('x-authentik-groups') || '';
// Read user roles from ConfigMap-mounted file
let userRole: UserRole = { role: 'unknown', namespaces: [] };
try {
const usersJson = readFileSync('/config/users.json', 'utf-8');
const users = JSON.parse(usersJson);
if (users[email]) {
userRole = users[email];
}
} catch {
// ConfigMap not mounted or parse error
}
return {
email,
username,
groups: groups.split('|').filter(Boolean),
role: userRole.role,
namespaces: userRole.namespaces
};
};

View file

@ -1,102 +0,0 @@
<script lang="ts">
let { data } = $props();
</script>
<main>
<h1>Kubernetes Access Portal</h1>
<div class="callout warning">
<strong>VPN Required</strong> — The cluster is on a private network. You need Headscale VPN access before kubectl will work.
<a href="/onboarding">See the Getting Started guide</a> for VPN setup instructions.
</div>
<section>
<h2>Your Identity</h2>
<p><strong>Username:</strong> {data.username}</p>
<p><strong>Email:</strong> {data.email}</p>
<p><strong>Role:</strong> {data.role}</p>
{#if data.namespaces.length > 0}
<p><strong>Namespaces:</strong> {data.namespaces.join(', ')}</p>
{/if}
</section>
{#if data.role === 'namespace-owner'}
<section>
<h2>Your Namespace</h2>
<p><strong>Assigned namespaces:</strong> {data.namespaces.join(', ')}</p>
<h3>Quick Commands</h3>
<pre>
# Check your pods
kubectl get pods -n {data.namespaces[0]}
# View quota usage
kubectl describe resourcequota -n {data.namespaces[0]}
# Log into Vault
vault login -method=oidc
# Store a secret
vault kv put secret/{data.username}/myapp KEY=value
# Get K8s deploy token
vault write kubernetes/creds/{data.namespaces[0]}-deployer \
kubernetes_namespace={data.namespaces[0]}</pre>
</section>
{/if}
<section>
<h2>Get Started</h2>
<ol>
{#if data.role === 'namespace-owner'}
<li><a href="/onboarding?role=namespace-owner">Complete the namespace-owner onboarding guide</a></li>
{:else}
<li><a href="/onboarding">Complete the onboarding guide</a> (VPN, kubectl, git)</li>
{/if}
<li><a href="/setup">Install kubectl and kubelogin</a></li>
<li><a href="/download">Download your kubeconfig</a></li>
<li>Run <code>kubectl get namespaces</code> to verify access</li>
</ol>
</section>
<section>
<h2>Resources</h2>
<ul>
<li><a href="/architecture">Architecture overview</a></li>
<li><a href="/services">Service catalog</a></li>
<li><a href="/contributing">How to contribute</a></li>
<li><a href="/troubleshooting">Troubleshooting</a></li>
</ul>
</section>
</main>
<style>
main {
max-width: 768px;
margin: 2rem auto;
padding: 0 1rem;
font-family: system-ui, -apple-system, sans-serif;
line-height: 1.6;
}
code {
background: #f0f0f0;
padding: 2px 6px;
border-radius: 3px;
}
section {
margin: 2rem 0;
}
.callout {
padding: 1rem;
border-radius: 6px;
margin: 1rem 0;
}
.callout.warning {
background: #fff3cd;
border-left: 4px solid #ffc107;
}
.callout a {
color: #856404;
font-weight: 600;
}
</style>

View file

@ -1,61 +0,0 @@
<main class="content">
<h1>Agent Bootstrap</h1>
<p>Point any AI coding agent at this cluster and it can bootstrap itself automatically.</p>
<section>
<h2>For AI Agents</h2>
<p>Fetch the machine-readable bootstrap document:</p>
<pre>curl -fsSL https://k8s-portal.viktorbarzin.me/agent</pre>
<p>This returns a plain-text markdown document with everything an agent needs: setup commands, critical rules, secrets workflow, Terraform conventions, key file paths, and common operations.</p>
</section>
<section>
<h2>Usage with Claude Code</h2>
<pre>claude "$(curl -fsSL https://k8s-portal.viktorbarzin.me/agent)" "Deploy a new echo service"</pre>
<p>Or within a session:</p>
<ol>
<li>Clone the repo: <code>git clone https://github.com/ViktorBarzin/infra.git && cd infra</code></li>
<li>Start Claude Code: <code>claude</code></li>
<li>Claude auto-reads <code>AGENTS.md</code> and <code>.claude/CLAUDE.md</code> from the repo</li>
</ol>
</section>
<section>
<h2>Usage with Codex / Other Agents</h2>
<ol>
<li>Clone the repo and <code>cd</code> into it</li>
<li>Run the setup script: <code>bash &lt;(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</code></li>
<li>Start the agent — it will read <code>AGENTS.md</code> for instructions</li>
</ol>
<p>If the agent doesn't auto-read <code>AGENTS.md</code>, feed it the bootstrap doc:</p>
<pre>curl -fsSL https://k8s-portal.viktorbarzin.me/agent</pre>
</section>
<section>
<h2>What the Agent Gets</h2>
<ul>
<li>Quick-start commands (setup script, repo clone)</li>
<li>Critical rules (no kubectl apply, no plaintext secrets, no NFS restart)</li>
<li>Sealed Secrets workflow (kubeseal self-service)</li>
<li>Terraform conventions (fileset pattern, tiers, storage, shared vars)</li>
<li>Key file paths in the repo</li>
<li>Common operations (deploy, fix pods, add secrets)</li>
<li>Contributing workflow (branch, PR, review, CI)</li>
</ul>
</section>
<section>
<h2>The <code>/agent</code> Endpoint</h2>
<p>The endpoint is <strong>unauthenticated</strong> — no login required. Agents can <code>curl</code> or <code>WebFetch</code> it directly without a browser session, just like the setup script.</p>
<p>Content-Type: <code>text/plain</code> — no HTML parsing needed.</p>
</section>
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
section { margin: 2rem 0; }
</style>

View file

@ -1,161 +0,0 @@
import type { RequestHandler } from './$types';
const BOOTSTRAP_DOC = `# Infrastructure Cluster — AI Agent Bootstrap
> Fetch this document: \`curl -fsSL https://k8s-portal.viktorbarzin.me/agent\`
## Quick Start
\`\`\`bash
# 1. Install tools (kubectl, kubelogin, kubeseal)
bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)
# 2. Clone the infrastructure repo
git clone https://github.com/ViktorBarzin/infra.git && cd infra
# 3. Verify cluster access (opens browser for OIDC login on first run)
kubectl get namespaces
\`\`\`
## Critical Rules (MUST FOLLOW)
- **ALL changes through Terraform/Terragrunt** NEVER \`kubectl apply/edit/patch/delete\` for persistent changes. Read-only kubectl is fine.
- **NEVER put secrets in plaintext** use Sealed Secrets (\`kubeseal\`) or \`secrets.sops.json\` (SOPS-encrypted).
- **NEVER restart NFS on TrueNAS** causes cluster-wide mount failures across all pods.
- **NEVER commit secrets** triple-check before every commit.
- **\`[ci skip]\` in commit messages** when changes were already applied locally.
- **Ask before \`git push\`** — always confirm with the user first.
## Sealed Secrets (Self-Service)
You can manage your own secrets without SOPS access using \`kubeseal\`:
\`\`\`bash
# 1. Create a sealed secret
kubectl create secret generic <name> \\
--from-literal=key=value -n <namespace> \\
--dry-run=client -o yaml | \\
kubeseal --controller-name sealed-secrets \\
--controller-namespace sealed-secrets -o yaml > sealed-<name>.yaml
# 2. Place the file in the stack directory: stacks/<service>/sealed-<name>.yaml
# 3. Ensure the stack's main.tf has the fileset block (add if missing):
\`\`\`
\`\`\`hcl
resource "kubernetes_manifest" "sealed_secrets" {
for_each = fileset(path.module, "sealed-*.yaml")
manifest = yamldecode(file("\${path.module}/\${each.value}"))
}
\`\`\`
\`\`\`bash
# 4. Push to PR CI runs terragrunt apply controller decrypts into real K8s Secrets
\`\`\`
- Files MUST match the \`sealed-*.yaml\` glob pattern.
- Only the in-cluster controller has the private key. \`kubeseal\` uses the public key — safe to distribute.
- The \`kubernetes_manifest\` block is safe to add even with zero sealed-*.yaml files (empty for_each).
## SOPS Secrets (Admin-Only Fallback)
For secrets requiring admin access (shared infra passwords, API keys):
- **\`secrets.sops.json\`** — SOPS-encrypted secrets (JSON format)
- **Edit**: \`sops secrets.sops.json\` (opens $EDITOR, re-encrypts on save)
- **Add**: \`sops set secrets.sops.json '["new_key"]' '"value"'\`
- **Operators without SOPS keys**: comment on your PR asking Viktor to add the secret.
## Terraform Conventions
### Execution
- **Apply a service**: \`scripts/tg apply --non-interactive\` (auto-decrypts SOPS secrets)
- **Plan**: \`scripts/tg plan --non-interactive\`
- **kubectl**: \`kubectl --kubeconfig $(pwd)/config\`
- **Health check**: \`bash scripts/cluster_healthcheck.sh --quiet\`
### Key Paths
| Path | Purpose |
|------|---------|
| \`stacks/<service>/main.tf\` | Service definition |
| \`stacks/platform/modules/<module>/\` | Core infra modules (~22) |
| \`modules/kubernetes/ingress_factory/\` | Standardized ingress (auth, rate limiting, anti-AI) |
| \`modules/kubernetes/nfs_volume/\` | NFS volume module (CSI-backed, soft mount) |
| \`config.tfvars\` | Non-secret configuration (plaintext) |
| \`secrets.sops.json\` | All secrets (SOPS-encrypted JSON) |
| \`scripts/cluster_healthcheck.sh\` | 25-check cluster health script |
| \`AGENTS.md\` | Full AI agent instructions (auto-loaded by most agents) |
### Tier System
\`0-core\` | \`1-cluster\` | \`2-gpu\` | \`3-edge\` | \`4-aux\`
Kyverno auto-generates LimitRange + ResourceQuota per namespace based on tier label.
- Containers without explicit \`resources {}\` get default limits (256Mi for edge/aux — causes OOMKill for heavy apps)
- Always set explicit resources on containers that need more than defaults
- Opt-out labels: \`resource-governance/custom-quota=true\` / \`resource-governance/custom-limitrange=true\`
### Storage
- **NFS** (\`nfs-truenas\` StorageClass): For app data. Use the \`nfs_volume\` module.
- **iSCSI** (\`iscsi-truenas\` StorageClass): For databases (PostgreSQL, MySQL).
### Shared Variables (never hardcode)
\`var.nfs_server\`, \`var.redis_host\`, \`var.postgresql_host\`, \`var.mysql_host\`, \`var.ollama_host\`, \`var.mail_host\`
## Architecture
- Terragrunt-based homelab managing a Kubernetes cluster (5 nodes, v1.34.2) on Proxmox VMs
- 70+ services, each in \`stacks/<service>/\` with its own Terraform state
- Core platform: \`stacks/platform/modules/\` (Traefik, Kyverno, monitoring, dbaas, sealed-secrets, etc.)
- Public domain: \`viktorbarzin.me\` (Cloudflare) | Internal: \`viktorbarzin.lan\` (Technitium DNS)
- CI/CD: Woodpecker CI PRs run plan, merges to master auto-apply platform stack
## Common Operations
### Deploy a New Service
1. Copy an existing stack as template: \`cp -r stacks/echo stacks/my-service\`
2. Edit \`main.tf\` — update image, ports, ingress, resources
3. Add DNS in \`config.tfvars\`
4. Apply platform first if needed, then the service
### Fix Crashed Pods
1. Run \`bash scripts/cluster_healthcheck.sh --quiet\`
2. Safe to delete evicted/failed pods and CrashLoopBackOff pods with >10 restarts
3. OOMKilled? Check \`kubectl describe limitrange tier-defaults -n <ns>\` and increase \`resources.limits.memory\`
### Add a Secret
- **Self-service**: Use \`kubeseal\` (see Sealed Secrets section above)
- **Admin**: \`sops set secrets.sops.json '["key"]' '"value"'\` then commit
## Contributing Workflow
1. Create a branch: \`git checkout -b fix/my-change\`
2. Make changes in \`stacks/<service>/main.tf\`
3. Push and open a PR: \`git push -u origin fix/my-change\`
4. Viktor reviews and merges
5. CI applies automatically Slack notification when done
## Infrastructure Details
- **Proxmox**: 192.168.1.127 (Dell R730, 22c/44t, 142GB RAM)
- **Nodes**: k8s-master (10.0.20.100), node1 (GPU, Tesla T4), node2-4
- **GPU workloads**: \`node_selector = { "gpu": "true" }\` + toleration \`nvidia.com/gpu\`
- **Pull-through cache**: 10.0.20.10 use versioned image tags (cache serves stale :latest manifests)
- **MySQL InnoDB Cluster**: 3 instances on iSCSI
- **SMTP**: \`var.mail_host\` port 587 STARTTLS
## Further Reading
- Full agent instructions: \`AGENTS.md\` in the repo root
- Patterns and examples: \`.claude/reference/patterns.md\`
- Service catalog: \`.claude/reference/service-catalog.md\`
- Onboarding guide: https://k8s-portal.viktorbarzin.me/onboarding
`;
export const GET: RequestHandler = async () => {
return new Response(BOOTSTRAP_DOC, {
headers: {
'Content-Type': 'text/plain; charset=utf-8',
'Cache-Control': 'public, max-age=3600'
}
});
};

View file

@ -1,75 +0,0 @@
<main class="content">
<h1>Architecture</h1>
<section>
<h2>Overview</h2>
<p>The infrastructure runs on a single Dell R730 server (22 CPU cores, 142GB RAM) using Proxmox to manage virtual machines. Five of those VMs form a Kubernetes cluster that runs 70+ services.</p>
<pre class="output">
Proxmox (Dell R730)
├── k8s-master (10.0.20.100) — control plane
├── k8s-node1 (10.0.20.101) — GPU node (Tesla T4)
├── k8s-node2 (10.0.20.102) — worker
├── k8s-node3 (10.0.20.103) — worker
├── k8s-node4 (10.0.20.104) — worker
├── TrueNAS (10.0.10.15) — storage (NFS + iSCSI)
└── pfSense (10.0.20.1) — firewall + gateway</pre>
</section>
<section>
<h2>Networking</h2>
<ul>
<li><strong>Public domain</strong>: <code>viktorbarzin.me</code> — managed by Cloudflare</li>
<li><strong>Internal domain</strong>: <code>viktorbarzin.lan</code> — managed by Technitium DNS</li>
<li><strong>Ingress</strong>: Cloudflare → Traefik → services</li>
<li><strong>VPN</strong>: Headscale (self-hosted Tailscale)</li>
</ul>
</section>
<section>
<h2>Storage</h2>
<ul>
<li><strong>NFS</strong> (<code>nfs-truenas</code>) — for app data (files, configs, media). Stored on TrueNAS.</li>
<li><strong>iSCSI</strong> (<code>iscsi-truenas</code>) — for databases (PostgreSQL, MySQL). Block storage.</li>
</ul>
</section>
<section>
<h2>Service Tiers</h2>
<p>Services are organized into tiers that control resource limits and restart priority:</p>
<table>
<thead><tr><th>Tier</th><th>Examples</th><th>Priority</th></tr></thead>
<tbody>
<tr><td><strong>0-core</strong></td><td>Traefik, DNS, VPN, Auth</td><td>Highest — never evicted</td></tr>
<tr><td><strong>1-cluster</strong></td><td>Redis, Prometheus, CrowdSec</td><td>High</td></tr>
<tr><td><strong>2-gpu</strong></td><td>Ollama, Immich ML, Whisper</td><td>Medium</td></tr>
<tr><td><strong>3-edge</strong></td><td>Nextcloud, Paperless, Grafana</td><td>Normal</td></tr>
<tr><td><strong>4-aux</strong></td><td>Dashy, PrivateBin, CyberChef</td><td>Low — evicted first under pressure</td></tr>
</tbody>
</table>
</section>
<section>
<h2>Infrastructure as Code</h2>
<p>Everything is managed with <strong>Terraform</strong> (via <strong>Terragrunt</strong>). Each service has its own stack:</p>
<pre class="output">stacks/
├── platform/ ← core infra (22 modules)
├── url/ ← URL shortener (Shlink)
├── immich/ ← photo library
├── nextcloud/ ← file storage
└── ... (70+ more)</pre>
<p>Changes go through git: branch → PR → review → merge → CI applies automatically.</p>
</section>
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
.content pre.output { background: #f5f5f5; color: #333; }
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
section { margin: 2rem 0; }
table { border-collapse: collapse; width: 100%; }
th, td { border: 1px solid #ddd; padding: 0.5rem; text-align: left; }
th { background: #f5f5f5; }
</style>

View file

@ -1,115 +0,0 @@
<main class="content">
<h1>How to Contribute</h1>
<section>
<h2>Workflow</h2>
<ol>
<li><strong>Create a branch</strong>: <code>git checkout -b fix/my-change</code></li>
<li><strong>Make your changes</strong> in <code>stacks/&lt;service&gt;/main.tf</code></li>
<li><strong>Push and open a PR</strong>: <code>git push -u origin fix/my-change</code></li>
<li><strong>Viktor reviews</strong> and merges</li>
<li><strong>CI applies</strong> automatically — Slack notification when done</li>
</ol>
</section>
<section>
<h2>What you CAN change</h2>
<ul>
<li>Service configurations (image tags, environment variables, resource limits)</li>
<li>New services (add a new stack under <code>stacks/</code>)</li>
<li>Ingress routes, health probes, replica counts</li>
</ul>
</section>
<section>
<h2>What needs Viktor's review</h2>
<ul>
<li>CI pipeline changes (<code>.woodpecker/</code>)</li>
<li>Terragrunt configuration (<code>terragrunt.hcl</code>)</li>
<li>Secrets configuration (<code>.sops.yaml</code>)</li>
<li>Core platform modules (<code>stacks/platform/</code>)</li>
</ul>
</section>
<section>
<h2 class="danger-header">NEVER do these</h2>
<div class="callout danger">
<ul>
<li><strong>Never <code>kubectl apply/edit/patch</code></strong> — all changes go through Terraform</li>
<li><strong>Never put secrets in code</strong> — ask Viktor to add them to the encrypted secrets file</li>
<li><strong>Never restart NFS on TrueNAS</strong> — causes cluster-wide mount failures</li>
<li><strong>Never push directly to master</strong> — always use a PR</li>
</ul>
</div>
</section>
<section>
<h2>Need a new secret?</h2>
<p>Comment on your PR: "I need a database password for my-service." Viktor will add it to the encrypted secrets file and push to your branch.</p>
<p>Then reference it in your Terraform: <code>var.my_service_db_password</code></p>
</section>
<section>
<h2>Namespace Owner Workflow</h2>
<p>If you are a namespace owner, you can deploy your own apps:</p>
<ol>
<li>Clone the infra repo: <code>git clone https://github.com/ViktorBarzin/infra.git</code></li>
<li>Copy the template: <code>cp -r stacks/_template stacks/your-app</code></li>
<li>Rename: <code>mv stacks/your-app/main.tf.example stacks/your-app/main.tf</code></li>
<li>Edit <code>main.tf</code> — replace all <code>&lt;placeholders&gt;</code></li>
<li>Store secrets in Vault: <code>vault kv put secret/your-username/your-app KEY=value</code></li>
<li>Add your app domain to your <code>domains</code> list in Vault KV</li>
<li>Submit a PR, get it reviewed</li>
<li>After merge, admin runs <code>terragrunt apply</code></li>
</ol>
</section>
<section>
<h2>CI Pipeline Template</h2>
<p>Create a <code>.woodpecker.yml</code> in your app's Forgejo repo:</p>
<pre>{`steps:
- name: build
image: woodpeckerci/plugin-docker-buildx
settings:
repo: your-dockerhub-user/myapp
tag: ["\${CI_PIPELINE_NUMBER}", "latest"]
username:
from_secret: dockerhub-username
password:
from_secret: dockerhub-token
platforms: linux/amd64
- name: deploy
image: hashicorp/vault:1.18.1
commands:
- export VAULT_ADDR=http://vault-active.vault.svc.cluster.local:8200
- export VAULT_TOKEN=$(vault write -field=token auth/kubernetes/login
role=ci jwt=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token))
- KUBE_TOKEN=$(vault write -field=service_account_token
kubernetes/creds/YOUR_NAMESPACE-deployer
kubernetes_namespace=YOUR_NAMESPACE)
- kubectl --server=https://kubernetes.default.svc
--token=$KUBE_TOKEN
--certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-n YOUR_NAMESPACE set image deployment/myapp
myapp=your-dockerhub-user/myapp:\${CI_PIPELINE_NUMBER}`}</pre>
</section>
<section>
<h2>Need a secret for your app?</h2>
<p>As a namespace owner, you manage your own secrets in Vault:</p>
<pre>vault kv put secret/your-username/your-app DB_PASSWORD=mysecret API_KEY=abc123</pre>
<p>Then reference them in your Terraform using a <code>data "vault_kv_secret_v2"</code> block.</p>
</section>
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
section { margin: 2rem 0; }
.callout { padding: 1rem; border-radius: 6px; margin: 1rem 0; }
.callout.danger { background: #f8d7da; border-left: 4px solid #dc3545; }
.danger-header { color: #dc3545; }
</style>

View file

@ -1,58 +0,0 @@
import type { RequestHandler } from './$types';
import { readFileSync } from 'fs';
const CLUSTER_SERVER = 'https://10.0.20.100:6443';
const OIDC_ISSUER = 'https://authentik.viktorbarzin.me/application/o/kubernetes/';
const OIDC_CLIENT_ID = 'kubernetes';
export const GET: RequestHandler = async ({ request }) => {
const email = request.headers.get('x-authentik-email') || 'user';
// Read CA cert from mounted ConfigMap
let caCert = '';
try {
caCert = readFileSync('/config/ca.crt', 'utf-8');
} catch {
// CA cert not available
}
const caCertBase64 = Buffer.from(caCert).toString('base64');
const sanitizedEmail = email.replace(/[^a-zA-Z0-9@._-]/g, '');
const kubeconfig = `apiVersion: v1
kind: Config
clusters:
- cluster:
server: ${CLUSTER_SERVER}
certificate-authority-data: ${caCertBase64}
name: home-cluster
contexts:
- context:
cluster: home-cluster
user: oidc-${sanitizedEmail}
name: home-cluster
current-context: home-cluster
users:
- name: oidc-${sanitizedEmail}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: kubectl
args:
- oidc-login
- get-token
- --oidc-issuer-url=${OIDC_ISSUER}
- --oidc-client-id=${OIDC_CLIENT_ID}
- --oidc-extra-scope=email
- --oidc-extra-scope=profile
- --oidc-extra-scope=groups
interactiveMode: IfAvailable
`;
return new Response(kubeconfig, {
headers: {
'Content-Type': 'application/yaml',
'Content-Disposition': `attachment; filename="kubeconfig-home-cluster.yaml"`
}
});
};

View file

@ -1,146 +0,0 @@
<script>
import { page } from '$app/stores';
let showNamespaceOwner = $derived($page.url.searchParams.get('role') === 'namespace-owner');
</script>
<main class="content">
<h1>Getting Started</h1>
<p>Welcome! Follow these steps to get access to the home Kubernetes cluster.</p>
<div class="role-tabs">
<a href="/onboarding" class:active={!showNamespaceOwner}>General User</a>
<a href="/onboarding?role=namespace-owner" class:active={showNamespaceOwner}>Namespace Owner</a>
</div>
<section>
<h2>Step 0 — Join the VPN</h2>
<p>The cluster is on a private network (<code>10.0.20.0/24</code>). You need VPN access first.</p>
<ol>
<li>Install <a href="https://tailscale.com/download" target="_blank">Tailscale</a> for your OS</li>
<li>Run this in your terminal:
<pre>tailscale login --login-server https://headscale.viktorbarzin.me</pre>
</li>
<li>A browser window will open with a registration URL</li>
<li>Send that URL to Viktor via email (<a href="mailto:vbarzin@gmail.com">vbarzin@gmail.com</a>) or Slack</li>
<li>Wait for approval (usually within a few hours)</li>
<li>Once approved, test: <pre>ping 10.0.20.100</pre></li>
</ol>
</section>
<section>
<h2>Step 1 — Log in to the portal</h2>
<p>Visit <a href="https://k8s-portal.viktorbarzin.me">k8s-portal.viktorbarzin.me</a> and sign in with your Authentik account.</p>
<p>If you don't have an account yet, ask Viktor to create one.</p>
</section>
<section>
<h2>Step 2 — Set up kubectl</h2>
<p>Run one of these commands in your terminal to install everything automatically:</p>
<h3>macOS</h3>
<p class="prereq">Requires <a href="https://brew.sh" target="_blank">Homebrew</a>. Install it first if you don't have it.</p>
<pre>bash &lt;(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=mac)</pre>
<h3>Linux</h3>
<pre>bash &lt;(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</pre>
<h3>Windows</h3>
<p>Use <a href="https://learn.microsoft.com/en-us/windows/wsl/install" target="_blank">WSL2</a> and follow the Linux instructions.</p>
</section>
{#if showNamespaceOwner}
<section>
<h2>Step 3 — Log into Vault</h2>
<p>Vault manages your secrets and issues dynamic Kubernetes credentials.</p>
<pre>vault login -method=oidc</pre>
<p>This opens your browser for Authentik SSO. After login, your token is saved to <code>~/.vault-token</code>.</p>
</section>
<section>
<h2>Step 4 — Verify kubectl access</h2>
<p>Run this command. It will open your browser for OIDC login the first time:</p>
<pre>kubectl get pods -n YOUR_NAMESPACE</pre>
<p>You should see an empty list (no resources) or your running pods.</p>
</section>
<section>
<h2>Step 5 — Clone the infra repo</h2>
<pre>git clone https://github.com/ViktorBarzin/infra.git
cd infra</pre>
<p>This is where all the infrastructure configuration lives.</p>
</section>
<section>
<h2>Step 6 — Create your first app stack</h2>
<ol>
<li>Copy the template: <pre>cp -r stacks/_template stacks/myapp
mv stacks/myapp/main.tf.example stacks/myapp/main.tf</pre></li>
<li>Edit <code>stacks/myapp/main.tf</code> — replace all <code>&lt;placeholders&gt;</code></li>
<li>Store secrets in Vault:
<pre>vault kv put secret/YOUR_USERNAME/myapp DB_PASSWORD=secret123</pre>
</li>
<li>Add your app domain to <code>domains</code> list in Vault KV <code>k8s_users</code></li>
<li>Submit a PR:
<pre>git checkout -b feat/myapp
git add stacks/myapp/
git commit -m "add myapp stack"
git push -u origin feat/myapp</pre>
</li>
<li>Viktor reviews and merges</li>
<li>After merge: <code>cd stacks/myapp && terragrunt apply</code></li>
</ol>
</section>
{:else}
<section>
<h2>Step 3 — Verify access</h2>
<p>Run this command. It will open your browser for login the first time:</p>
<pre>kubectl get namespaces</pre>
<p>You should see output like:</p>
<pre class="output">NAME STATUS AGE
default Active 200d
kube-system Active 200d
monitoring Active 200d
...</pre>
<p>If you get a connection error, make sure your VPN is connected (<code>tailscale status</code>).</p>
</section>
<section>
<h2>Step 4 — Clone the repo</h2>
<pre>git clone https://github.com/ViktorBarzin/infra.git
cd infra</pre>
<p>This is where all the infrastructure configuration lives.</p>
</section>
<section>
<h2>Step 5 — Install your AI assistant (optional)</h2>
<p>Install <a href="https://github.com/openai/codex" target="_blank">Codex CLI</a> for AI-assisted cluster management:</p>
<pre>npm install -g @openai/codex</pre>
<p>Codex reads the <code>AGENTS.md</code> file in the repo and knows how to work with the cluster.</p>
</section>
<section>
<h2>Step 6 — Your first change</h2>
<ol>
<li>Create a branch: <pre>git checkout -b my-first-change</pre></li>
<li>Edit a service file (e.g., change an image tag in <code>stacks/echo/main.tf</code>)</li>
<li>Commit and push: <pre>git add . && git commit -m "my first change" && git push -u origin my-first-change</pre></li>
<li>Open a Pull Request on GitHub</li>
<li>Viktor reviews and merges</li>
<li>Woodpecker CI automatically applies the change to the cluster</li>
<li>Slack notification confirms it worked</li>
</ol>
</section>
{/if}
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
.content h3 { color: #666; margin: 1rem 0 0.25rem; }
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
.content pre.output { background: #f5f5f5; color: #333; }
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
.content .prereq { font-size: 0.9rem; color: #666; font-style: italic; }
section { margin: 2rem 0; }
.role-tabs { display: flex; gap: 0; margin: 1.5rem 0; border-bottom: 2px solid #e0e0e0; }
.role-tabs a { padding: 0.5rem 1.5rem; text-decoration: none; color: #666; border-bottom: 2px solid transparent; margin-bottom: -2px; }
.role-tabs a.active { color: #333; border-bottom-color: #333; font-weight: 600; }
</style>

View file

@ -1,58 +0,0 @@
<main class="content">
<h1>Service Catalog</h1>
<p>70+ services running on the cluster. Here are the most commonly used:</p>
<section>
<h2>Core Services</h2>
<table>
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
<tbody>
<tr><td>Grafana</td><td><a href="https://grafana.viktorbarzin.me">grafana.viktorbarzin.me</a></td><td>Monitoring dashboards</td></tr>
<tr><td>Uptime Kuma</td><td><a href="https://uptime.viktorbarzin.me">uptime.viktorbarzin.me</a></td><td>Service uptime monitoring</td></tr>
<tr><td>Authentik</td><td><a href="https://authentik.viktorbarzin.me">authentik.viktorbarzin.me</a></td><td>Identity provider (SSO)</td></tr>
<tr><td>Woodpecker CI</td><td><a href="https://ci.viktorbarzin.me">ci.viktorbarzin.me</a></td><td>CI/CD pipeline</td></tr>
</tbody>
</table>
</section>
<section>
<h2>User-Facing Services</h2>
<table>
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
<tbody>
<tr><td>Nextcloud</td><td><a href="https://nextcloud.viktorbarzin.me">nextcloud.viktorbarzin.me</a></td><td>File storage, calendar, contacts</td></tr>
<tr><td>Immich</td><td><a href="https://immich.viktorbarzin.me">immich.viktorbarzin.me</a></td><td>Photo library (Google Photos alternative)</td></tr>
<tr><td>Vaultwarden</td><td><a href="https://vault.viktorbarzin.me">vault.viktorbarzin.me</a></td><td>Password manager</td></tr>
<tr><td>Paperless-ngx</td><td><a href="https://pdf.viktorbarzin.me">pdf.viktorbarzin.me</a></td><td>Document management</td></tr>
<tr><td>Navidrome</td><td><a href="https://music.viktorbarzin.me">music.viktorbarzin.me</a></td><td>Music streaming</td></tr>
<tr><td>Tandoor</td><td><a href="https://recipes.viktorbarzin.me">recipes.viktorbarzin.me</a></td><td>Recipe manager</td></tr>
<tr><td>Linkwarden</td><td><a href="https://bookmarks.viktorbarzin.me">bookmarks.viktorbarzin.me</a></td><td>Bookmark manager</td></tr>
</tbody>
</table>
</section>
<section>
<h2>Developer Tools</h2>
<table>
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
<tbody>
<tr><td>Forgejo</td><td><a href="https://forgejo.viktorbarzin.me">forgejo.viktorbarzin.me</a></td><td>Git server (Gitea fork)</td></tr>
<tr><td>CyberChef</td><td><a href="https://cyberchef.viktorbarzin.me">cyberchef.viktorbarzin.me</a></td><td>Data transformation tool</td></tr>
<tr><td>Excalidraw</td><td><a href="https://draw.viktorbarzin.me">draw.viktorbarzin.me</a></td><td>Whiteboard drawing</td></tr>
<tr><td>PrivateBin</td><td><a href="https://paste.viktorbarzin.me">paste.viktorbarzin.me</a></td><td>Encrypted paste bin</td></tr>
<tr><td>JSON Crack</td><td><a href="https://jsoncrack.viktorbarzin.me">jsoncrack.viktorbarzin.me</a></td><td>JSON visualizer</td></tr>
</tbody>
</table>
</section>
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
section { margin: 2rem 0; }
table { border-collapse: collapse; width: 100%; }
th, td { border: 1px solid #ddd; padding: 0.5rem; text-align: left; }
th { background: #f5f5f5; }
a { color: #1a73e8; }
</style>

View file

@ -1,69 +0,0 @@
<main>
<h1>Setup Instructions</h1>
<section>
<h2>Quick Setup (one command)</h2>
<p>Run this in your terminal to install everything and configure kubectl automatically:</p>
<h3>macOS</h3>
<pre>bash &lt;(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=mac)</pre>
<h3>Linux</h3>
<pre>bash &lt;(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</pre>
</section>
<section>
<h2>Manual Setup</h2>
<h3>1. Install kubectl</h3>
<h4>macOS</h4>
<pre>brew install kubectl</pre>
<h4>Linux</h4>
<pre>curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl && sudo mv kubectl /usr/local/bin/</pre>
<h3>2. Install kubelogin (OIDC plugin)</h3>
<h4>macOS</h4>
<pre>brew install int128/kubelogin/kubelogin</pre>
<h4>Linux</h4>
<pre>curl -LO https://github.com/int128/kubelogin/releases/latest/download/kubelogin_linux_amd64.zip
unzip kubelogin_linux_amd64.zip && sudo mv kubelogin /usr/local/bin/kubectl-oidc_login
rm kubelogin_linux_amd64.zip</pre>
<h3>3. Download and use your kubeconfig</h3>
<pre>
mkdir -p ~/.kube
# Download from the portal (requires auth cookie from browser)
# Or use the download button on the portal homepage
# Set the KUBECONFIG environment variable
export KUBECONFIG=~/.kube/config-home
# Test access (opens browser for login)
kubectl get namespaces
</pre>
</section>
<p><a href="/">&#8592; Back to portal</a></p>
</main>
<style>
main {
max-width: 640px;
margin: 2rem auto;
font-family: system-ui;
}
pre {
background: #1e1e1e;
color: #d4d4d4;
padding: 1rem;
border-radius: 6px;
overflow-x: auto;
}
section {
margin: 2rem 0;
}
h4 {
margin: 0.5rem 0 0.25rem;
color: #666;
}
</style>

View file

@ -1,266 +0,0 @@
import type { RequestHandler } from './$types';
import { readFileSync } from 'fs';
const CLUSTER_SERVER = 'https://10.0.20.100:6443';
const OIDC_ISSUER = 'https://authentik.viktorbarzin.me/application/o/kubernetes/';
const OIDC_CLIENT_ID = 'kubernetes';
export const GET: RequestHandler = async ({ url }) => {
const os = url.searchParams.get('os') || 'mac';
let caCert = '';
try {
caCert = readFileSync('/config/ca.crt', 'utf-8');
} catch {
// CA cert not available
}
const caCertBase64 = Buffer.from(caCert).toString('base64');
const kubeconfigContent = `apiVersion: v1
kind: Config
clusters:
- cluster:
server: ${CLUSTER_SERVER}
certificate-authority-data: ${caCertBase64}
name: home-cluster
contexts:
- context:
cluster: home-cluster
user: oidc-user
name: home-cluster
current-context: home-cluster
users:
- name: oidc-user
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: kubectl
args:
- oidc-login
- get-token
- --oidc-issuer-url=${OIDC_ISSUER}
- --oidc-client-id=${OIDC_CLIENT_ID}
- --oidc-extra-scope=email
- --oidc-extra-scope=profile
- --oidc-extra-scope=groups
interactiveMode: IfAvailable`;
let script: string;
if (os === 'linux') {
script = `#!/bin/bash
set -e
echo "=== Kubernetes Cluster Setup ==="
echo ""
# Use sudo if available, otherwise install directly (e.g. in containers running as root)
SUDO=""
if [ "$(id -u)" -ne 0 ] && command -v sudo &>/dev/null; then
SUDO="sudo"
fi
# Determine install directory
INSTALL_DIR="/usr/local/bin"
if [ ! -w "\$INSTALL_DIR" ] && [ -z "\$SUDO" ]; then
INSTALL_DIR="\$HOME/.local/bin"
mkdir -p "\$INSTALL_DIR"
export PATH="\$INSTALL_DIR:\$PATH"
fi
# Install kubectl
if command -v kubectl &>/dev/null; then
echo "[OK] kubectl already installed"
else
echo "[..] Installing kubectl..."
KUBECTL_VERSION=\$(curl -L -s https://dl.k8s.io/release/stable.txt)
curl -fsSLO "https://dl.k8s.io/release/\${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
chmod +x kubectl && \$SUDO mv kubectl "\$INSTALL_DIR/"
echo "[OK] kubectl installed"
fi
# Install kubelogin
if command -v kubectl-oidc_login &>/dev/null; then
echo "[OK] kubelogin already installed"
else
echo "[..] Installing kubelogin..."
KUBELOGIN_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/int128/kubelogin/releases/latest | grep -o '[^/]*\$')
curl -fsSLO "https://github.com/int128/kubelogin/releases/download/\${KUBELOGIN_VERSION}/kubelogin_linux_amd64.zip"
unzip -o kubelogin_linux_amd64.zip kubelogin -d /tmp
\$SUDO mv /tmp/kubelogin "\$INSTALL_DIR/kubectl-oidc_login"
rm -f kubelogin_linux_amd64.zip
echo "[OK] kubelogin installed"
fi
# Install kubeseal
if command -v kubeseal &>/dev/null; then
echo "[OK] kubeseal already installed"
else
echo "[..] Installing kubeseal..."
KUBESEAL_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/bitnami-labs/sealed-secrets/releases/latest | grep -o '[^/]*\$')
curl -fsSLO "https://github.com/bitnami-labs/sealed-secrets/releases/download/\${KUBESEAL_VERSION}/kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz"
tar -xzf "kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz" kubeseal
\$SUDO mv kubeseal "\$INSTALL_DIR/"
rm -f "kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz"
echo "[OK] kubeseal installed"
fi
# Install Vault CLI
if command -v vault &>/dev/null; then
echo "[OK] vault already installed"
else
echo "[..] Installing Vault CLI..."
VAULT_VERSION="1.18.1"
curl -fsSLO "https://releases.hashicorp.com/vault/\${VAULT_VERSION}/vault_\${VAULT_VERSION}_linux_amd64.zip"
unzip -o "vault_\${VAULT_VERSION}_linux_amd64.zip" vault -d /tmp
\$SUDO mv /tmp/vault "\$INSTALL_DIR/"
rm -f "vault_\${VAULT_VERSION}_linux_amd64.zip"
echo "[OK] vault installed"
fi
# Install Terragrunt
if command -v terragrunt &>/dev/null; then
echo "[OK] terragrunt already installed"
else
echo "[..] Installing terragrunt..."
TG_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/gruntwork-io/terragrunt/releases/latest | grep -o '[^/]*\$')
curl -fsSLO "https://github.com/gruntwork-io/terragrunt/releases/download/\${TG_VERSION}/terragrunt_linux_amd64"
chmod +x terragrunt_linux_amd64
\$SUDO mv terragrunt_linux_amd64 "\$INSTALL_DIR/terragrunt"
echo "[OK] terragrunt installed"
fi
# Install Terraform
if command -v terraform &>/dev/null; then
echo "[OK] terraform already installed"
else
echo "[..] Installing terraform..."
TF_VERSION="1.9.8"
curl -fsSLO "https://releases.hashicorp.com/terraform/\${TF_VERSION}/terraform_\${TF_VERSION}_linux_amd64.zip"
unzip -o "terraform_\${TF_VERSION}_linux_amd64.zip" terraform -d /tmp
\$SUDO mv /tmp/terraform "\$INSTALL_DIR/"
rm -f "terraform_\${TF_VERSION}_linux_amd64.zip"
echo "[OK] terraform installed"
fi
# Write kubeconfig
mkdir -p ~/.kube
cat > ~/.kube/config-home << 'KUBECONFIG_EOF'
${kubeconfigContent}
KUBECONFIG_EOF
echo "[OK] Kubeconfig written to ~/.kube/config-home"
# Add KUBECONFIG to shell profile
SHELL_RC=~/.bashrc
[ -f ~/.zshrc ] && SHELL_RC=~/.zshrc
if ! grep -q 'config-home' "\$SHELL_RC" 2>/dev/null; then
echo 'export KUBECONFIG=~/.kube/config-home' >> "\$SHELL_RC"
echo "[OK] Added KUBECONFIG to \$SHELL_RC"
fi
export KUBECONFIG=~/.kube/config-home
echo ""
echo "=== Setup complete! ==="
echo ""
echo "Run 'kubectl get namespaces' to test (opens browser for login)."
echo "You may need to restart your shell or run: export KUBECONFIG=~/.kube/config-home"
`;
} else {
script = `#!/bin/bash
set -e
echo "=== Kubernetes Cluster Setup ==="
echo ""
# Check for Homebrew
if ! command -v brew &>/dev/null; then
echo "[!!] Homebrew not found. Install it first:"
echo ' /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"'
exit 1
fi
# Install kubectl
if command -v kubectl &>/dev/null; then
echo "[OK] kubectl already installed ($(kubectl version --client -o json 2>/dev/null | grep -o '"gitVersion":"[^"]*"' | cut -d'"' -f4))"
else
echo "[..] Installing kubectl..."
brew install kubectl
echo "[OK] kubectl installed"
fi
# Install kubelogin
if command -v kubectl-oidc_login &>/dev/null; then
echo "[OK] kubelogin already installed"
else
echo "[..] Installing kubelogin..."
brew install int128/kubelogin/kubelogin
echo "[OK] kubelogin installed"
fi
# Install kubeseal
if command -v kubeseal &>/dev/null; then
echo "[OK] kubeseal already installed"
else
echo "[..] Installing kubeseal..."
brew install kubeseal
echo "[OK] kubeseal installed"
fi
# Install Vault CLI
if command -v vault &>/dev/null; then
echo "[OK] vault already installed"
else
echo "[..] Installing Vault CLI..."
brew tap hashicorp/tap
brew install hashicorp/tap/vault
echo "[OK] vault installed"
fi
# Install Terragrunt
if command -v terragrunt &>/dev/null; then
echo "[OK] terragrunt already installed"
else
echo "[..] Installing terragrunt..."
brew install terragrunt
echo "[OK] terragrunt installed"
fi
# Install Terraform
if command -v terraform &>/dev/null; then
echo "[OK] terraform already installed"
else
echo "[..] Installing terraform..."
brew install hashicorp/tap/terraform
echo "[OK] terraform installed"
fi
# Write kubeconfig
mkdir -p ~/.kube
cat > ~/.kube/config-home << 'KUBECONFIG_EOF'
${kubeconfigContent}
KUBECONFIG_EOF
echo "[OK] Kubeconfig written to ~/.kube/config-home"
# Add KUBECONFIG to shell profile
SHELL_RC=~/.zshrc
[ ! -f ~/.zshrc ] && SHELL_RC=~/.bashrc
if ! grep -q 'config-home' "\$SHELL_RC" 2>/dev/null; then
echo 'export KUBECONFIG=~/.kube/config-home' >> "\$SHELL_RC"
echo "[OK] Added KUBECONFIG to \$SHELL_RC"
fi
export KUBECONFIG=~/.kube/config-home
echo ""
echo "=== Setup complete! ==="
echo ""
echo "Run 'kubectl get namespaces' to test (opens browser for login)."
echo "You may need to restart your shell or run: export KUBECONFIG=~/.kube/config-home"
`;
}
return new Response(script, {
headers: {
'Content-Type': 'text/plain; charset=utf-8'
}
});
};

View file

@ -1,63 +0,0 @@
<main class="content">
<h1>Troubleshooting</h1>
<section>
<h2>"kubectl can't connect to the server"</h2>
<ol>
<li>Check your VPN: <code>tailscale status</code> — should show "connected"</li>
<li>Check KUBECONFIG: <code>echo $KUBECONFIG</code> — should be <code>~/.kube/config-home</code></li>
<li>Test connectivity: <code>ping 10.0.20.100</code></li>
<li>If ping works but kubectl doesn't, re-run the <a href="/setup">setup script</a></li>
</ol>
</section>
<section>
<h2>"Forbidden" or "Permission denied"</h2>
<p>You may not have access to that namespace. Your access is scoped to specific namespaces.</p>
<p>Try: <code>kubectl get namespaces</code> to see which namespaces you can access.</p>
<p>Need access to another namespace? Ask Viktor.</p>
</section>
<section>
<h2>"Pod is CrashLoopBackOff"</h2>
<ol>
<li>Check pod logs: <code>kubectl logs -n &lt;namespace&gt; &lt;pod-name&gt; --tail=50</code></li>
<li>Check previous crash: <code>kubectl logs -n &lt;namespace&gt; &lt;pod-name&gt; --previous</code></li>
<li>Check events: <code>kubectl describe pod -n &lt;namespace&gt; &lt;pod-name&gt;</code></li>
<li>Common causes: OOMKilled (need more memory), bad config, database connection failure</li>
</ol>
</section>
<section>
<h2>"PR CI failed"</h2>
<ol>
<li>Check the Woodpecker CI dashboard: <a href="https://ci.viktorbarzin.me">ci.viktorbarzin.me</a></li>
<li>Read the build logs — the error is usually at the bottom</li>
<li>Fix the issue, commit, and push — CI will re-run</li>
</ol>
</section>
<section>
<h2>"I need a new secret / database password"</h2>
<p>Secrets are managed by Viktor in an encrypted file. You cannot add them yourself.</p>
<ol>
<li>Comment on your PR: "Need DB password for &lt;service&gt;"</li>
<li>Viktor adds the secret and pushes to your branch</li>
<li>Reference it as <code>var.&lt;service&gt;_db_password</code> in your Terraform</li>
</ol>
</section>
<section>
<h2>Still stuck?</h2>
<p>Email Viktor at <a href="mailto:vbarzin@gmail.com">vbarzin@gmail.com</a> or message on Slack.</p>
</section>
</main>
<style>
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
.content h2 { margin-top: 2rem; color: #333; }
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
section { margin: 2rem 0; }
</style>

View file

@ -1,3 +0,0 @@
# allow crawling everything by default
User-agent: *
Disallow:

View file

@ -1,10 +0,0 @@
import adapter from '@sveltejs/adapter-node';
/** @type {import('@sveltejs/kit').Config} */
const config = {
kit: {
adapter: adapter()
}
};
export default config;

View file

@ -1,20 +0,0 @@
{
"extends": "./.svelte-kit/tsconfig.json",
"compilerOptions": {
"rewriteRelativeImportExtensions": true,
"allowJs": true,
"checkJs": true,
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"skipLibCheck": true,
"sourceMap": true,
"strict": true,
"moduleResolution": "bundler"
}
// Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias
// except $lib which is handled by https://svelte.dev/docs/kit/configuration#files
//
// To make changes to top-level options such as include and exclude, we recommend extending
// the generated config; see https://svelte.dev/docs/kit/configuration#typescript
}

View file

@ -1,6 +0,0 @@
import { sveltekit } from '@sveltejs/kit/vite';
import { defineConfig } from 'vite';
export default defineConfig({
plugins: [sveltekit()]
});

View file

@ -1,166 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "k8s_ca_cert" {
type = string
default = ""
}
resource "kubernetes_namespace" "k8s_portal" {
metadata {
name = "k8s-portal"
labels = {
tier = var.tier
}
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_config_map" "k8s_portal_config" {
metadata {
name = "k8s-portal-config"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
}
data = {
"ca.crt" = var.k8s_ca_cert
}
}
resource "kubernetes_deployment" "k8s_portal" {
metadata {
name = "k8s-portal"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
labels = {
app = "k8s-portal"
tier = var.tier
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
revision_history_limit = 3
selector {
match_labels = {
app = "k8s-portal"
}
}
template {
metadata {
labels = {
app = "k8s-portal"
}
}
spec {
container {
name = "portal"
image = "viktorbarzin/k8s-portal:latest"
port {
container_port = 3000
}
volume_mount {
name = "config"
mount_path = "/config/ca.crt"
sub_path = "ca.crt"
read_only = true
}
volume_mount {
name = "user-roles"
mount_path = "/config/users.json"
sub_path = "users.json"
read_only = true
}
resources {
requests = {
cpu = "10m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
}
volume {
name = "config"
config_map {
name = kubernetes_config_map.k8s_portal_config.metadata[0].name
}
}
volume {
name = "user-roles"
config_map {
name = "k8s-user-roles"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
lifecycle {
ignore_changes = [
spec[0].template[0].spec[0].dns_config,
spec[0].template[0].spec[0].container[0].image, # CI updates image tag
]
}
}
resource "kubernetes_service" "k8s_portal" {
metadata {
name = "k8s-portal"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
}
spec {
selector = {
app = "k8s-portal"
}
port {
port = 80
target_port = 3000
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
name = "k8s-portal"
tls_secret_name = var.tls_secret_name
protected = true # Require Authentik login
extra_annotations = {
"gethomepage.dev/enabled" = "true"
"gethomepage.dev/name" = "K8s Portal"
"gethomepage.dev/description" = "Kubernetes portal"
"gethomepage.dev/icon" = "kubernetes.png"
"gethomepage.dev/group" = "Core Platform"
"gethomepage.dev/pod-selector" = ""
}
}
# Unprotected ingress for the setup script and agent endpoint (needs to be curl-able without auth)
module "ingress_setup_script" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
name = "k8s-portal-setup"
host = "k8s-portal"
service_name = "k8s-portal"
ingress_path = ["/setup/script", "/agent"]
tls_secret_name = var.tls_secret_name
protected = false
}

View file

@ -1,72 +0,0 @@
# =============================================================================
# Pod Dependency Init Container Injection
# =============================================================================
# Reads the annotation dependency.kyverno.io/wait-for from pods and injects
# init containers that wait for each listed dependency to be reachable.
#
# Usage:
# annotations:
# dependency.kyverno.io/wait-for: "postgresql.dbaas:5432,redis.redis:6379"
#
# Each comma-separated entry becomes a busybox init container that runs
# `nc -z <host> <port>` in a loop until the dependency is reachable.
# Existing init containers are preserved Kyverno appends to the array.
resource "kubernetes_manifest" "inject_dependency_init_containers" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "inject-dependency-init-containers"
annotations = {
"policies.kyverno.io/title" = "Inject Dependency Init Containers"
"policies.kyverno.io/description" = "Injects wait-for init containers based on dependency.kyverno.io/wait-for pod annotation. Each comma-separated host:port entry becomes a busybox init container that blocks until the dependency is reachable via nc -z."
}
}
spec = {
rules = [
{
name = "wait-for-dependencies"
match = {
any = [
{
resources = {
kinds = ["Pod"]
operations = ["CREATE"]
}
}
]
}
preconditions = {
all = [
{
key = "{{ request.object.metadata.annotations.\"dependency.kyverno.io/wait-for\" || '' }}"
operator = "NotEquals"
value = ""
}
]
}
mutate = {
foreach = [
{
list = "request.object.metadata.annotations.\"dependency.kyverno.io/wait-for\" | split(@, ',')"
patchStrategicMerge = {
spec = {
initContainers = [
{
name = "wait-for-{{ element | split(@, ':') | [0] | replace_all(@, '.', '-') }}"
image = "busybox:1.37"
command = ["sh", "-c", "until nc -z {{ element | split(@, ':') | [0] }} {{ element | split(@, ':') | [1] }}; do echo waiting for {{ element }}; sleep 2; done"]
}
]
}
}
}
]
}
}
]
}
}
}

View file

@ -1,216 +0,0 @@
resource "kubernetes_namespace" "kyverno" {
metadata {
name = "kyverno"
labels = {
"istio-injection" : "disabled"
}
}
}
resource "helm_release" "kyverno" {
namespace = kubernetes_namespace.kyverno.metadata[0].name
create_namespace = false
name = "kyverno"
atomic = true
repository = "https://kyverno.github.io/kyverno/"
chart = "kyverno"
version = "3.6.1"
values = [yamlencode({
# When Kyverno is unavailable, allow pod creation to proceed without
# mutation/validation rather than blocking all admissions cluster-wide.
features = {
forceFailurePolicyIgnore = {
enabled = true
}
policyReports = {
enabled = false
}
}
reportsController = {
resources = {
limits = {
memory = "512Mi"
}
requests = {
cpu = "100m"
memory = "384Mi"
}
}
}
backgroundController = {
resources = {
limits = {
memory = "384Mi"
}
requests = {
cpu = "100m"
memory = "384Mi"
}
}
}
cleanupController = {
resources = {
limits = {
memory = "192Mi"
}
requests = {
cpu = "100m"
memory = "192Mi"
}
}
}
admissionController = {
replicas = 2
updateStrategy = {
type = "RollingUpdate"
rollingUpdate = {
maxSurge = 0
maxUnavailable = 1
}
}
container = {
resources = {
limits = {
memory = "256Mi"
}
requests = {
cpu = "100m"
memory = "256Mi"
}
}
}
# More tolerant liveness probe API server slowness shouldn't kill the pod
livenessProbe = {
httpGet = {
path = "/health/liveness"
port = 9443
scheme = "HTTPS"
}
initialDelaySeconds = 15
periodSeconds = 30
timeoutSeconds = 5
failureThreshold = 4
successThreshold = 1
}
# Spread replicas across nodes for HA
topologySpreadConstraints = [
{
maxSkew = 1
topologyKey = "kubernetes.io/hostname"
whenUnsatisfiable = "DoNotSchedule"
labelSelector = {
matchLabels = {
"app.kubernetes.io/component" = "admission-controller"
"app.kubernetes.io/instance" = "kyverno"
}
}
}
]
}
})]
}
# To unlabel all:
# kubectl label deployment,statefulset,daemonset --all-namespaces -l tier tier-
#
# Uses namespaceSelector to match tiers no API call needed.
# One rule per tier so Kyverno resolves the tier value from its informer cache.
resource "kubernetes_manifest" "mutate_tier_from_namespace" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "sync-tier-label-from-namespace"
}
spec = {
rules = [for tier in local.governance_tiers : {
name = "sync-tier-${tier}"
match = {
any = [
{
resources = {
kinds = ["Deployment", "StatefulSet", "DaemonSet"]
namespaceSelector = {
matchLabels = {
tier = tier
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
namespaces = ["kube-system", "metallb-system", "n8n"]
}
}
]
}
mutate = {
patchStrategicMerge = {
metadata = {
labels = {
"+(tier)" = tier
}
}
}
}
}]
}
}
}
# resource "kubernetes_manifest" "enforce_pod_tier_label" {
# manifest = {
# apiVersion = "kyverno.io/v1"
# kind = "ClusterPolicy"
# metadata = {
# name = "enforce-pod-tier-label"
# annotations = {
# "policies.kyverno.io/description" = "Rejects any pod that does not have a tier label."
# }
# }
# spec = {
# # 'Enforce' blocks the creation. 'Audit' just reports it.
# validationFailureAction = "Enforce"
# background = true
# rules = [
# {
# name = "check-for-tier-label"
# match = {
# any = [
# {
# resources = {
# kinds = ["Pod"]
# }
# }
# ]
# }
# validate = {
# message = "The label 'tier' is required for all pods in this cluster."
# pattern = {
# metadata = {
# labels = {
# "tier" = "?*" # The "?*" syntax means the value must not be empty
# }
# }
# }
# }
# }
# ]
# }
# }
# }

View file

@ -1,950 +0,0 @@
# =============================================================================
# Tier-Based Resource Governance
# =============================================================================
# default (limit) = defaultRequest (request) to give Guaranteed QoS and prevent
# memory overcommit. Changed 2026-03-14 after node2 OOM crash caused by 250%
# memory overcommit (61GB limits on 24GB node).
#
# Four layers of protection against noisy neighbor issues:
# 1. PriorityClasses - critical services survive resource pressure
# 2. LimitRange defaults (Kyverno generate) - auto-inject defaults for containers without resources
# 3. ResourceQuotas (Kyverno generate) - hard ceiling on namespace resource consumption
# 4. Priority injection (Kyverno mutate) - set priorityClassName based on namespace tier label
locals {
governance_tiers = ["0-core", "1-cluster", "2-gpu", "3-edge", "4-aux"]
excluded_namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
}
# -----------------------------------------------------------------------------
# Layer 1: PriorityClasses
# -----------------------------------------------------------------------------
# Values stay well below system-cluster-critical (2,000,000,000)
resource "kubernetes_priority_class" "tier_0_core" {
metadata {
name = "tier-0-core"
}
value = 1000000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "Critical infrastructure: ingress, DNS, VPN, auth, monitoring"
}
resource "kubernetes_priority_class" "tier_1_cluster" {
metadata {
name = "tier-1-cluster"
}
value = 800000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "Cluster services: Redis, metrics, security"
}
resource "kubernetes_priority_class" "tier_2_gpu" {
metadata {
name = "tier-2-gpu"
}
value = 600000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "GPU workloads: Immich, Ollama, Frigate"
}
resource "kubernetes_priority_class" "gpu_workload" {
metadata {
name = "gpu-workload"
}
value = 1200000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "GPU-pinned workloads. Higher than all user tiers. Auto-injected by Kyverno on pods requesting nvidia.com/gpu."
}
resource "kubernetes_priority_class" "tier_3_edge" {
metadata {
name = "tier-3-edge"
}
value = 400000
global_default = false
preemption_policy = "PreemptLowerPriority"
description = "User-facing services: mail, file sync, dashboards"
}
resource "kubernetes_priority_class" "tier_4_aux" {
metadata {
name = "tier-4-aux"
}
value = 200000
global_default = false
preemption_policy = "Never"
description = "Optional services: blogs, tools, experiments. Will not preempt other aux services."
}
# -----------------------------------------------------------------------------
# Layer 2: LimitRange Defaults (Kyverno Generate)
# -----------------------------------------------------------------------------
# Creates a LimitRange in each namespace based on its tier label.
# Only affects containers WITHOUT explicit resource requests/limits.
resource "kubernetes_manifest" "generate_limitrange_by_tier" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "generate-limitrange-by-tier"
annotations = {
"policies.kyverno.io/title" = "Generate LimitRange by Tier"
"policies.kyverno.io/description" = "Creates tier-appropriate LimitRange defaults in namespaces based on their tier label. Only affects containers without explicit resource specifications. Excludes namespaces with resource-governance/custom-limitrange label."
}
}
spec = {
generateExisting = true
rules = [
# Tier 0-core
{
name = "limitrange-tier-0-core"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "0-core"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-limitrange" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "256Mi"
}
defaultRequest = {
cpu = "100m"
memory = "256Mi"
}
max = {
memory = "8Gi"
}
}
]
}
}
}
},
# Tier 1-cluster
{
name = "limitrange-tier-1-cluster"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "1-cluster"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-limitrange" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "256Mi"
}
defaultRequest = {
cpu = "100m"
memory = "256Mi"
}
max = {
memory = "4Gi"
}
}
]
}
}
}
},
# Tier 2-gpu
{
name = "limitrange-tier-2-gpu"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "2-gpu"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-limitrange" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "1Gi"
}
defaultRequest = {
cpu = "200m"
memory = "1Gi"
}
max = {
memory = "16Gi"
}
}
]
}
}
}
},
# Tier 3-edge Burstable QoS: request < limit to reduce scheduler pressure
{
name = "limitrange-tier-3-edge"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "3-edge"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-limitrange" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "192Mi"
}
defaultRequest = {
cpu = "50m"
memory = "96Mi"
}
max = {
memory = "4Gi"
}
}
]
}
}
}
},
# Tier 4-aux Burstable QoS: request < limit to reduce scheduler pressure
{
name = "limitrange-tier-4-aux"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "4-aux"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-limitrange" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "256Mi"
}
defaultRequest = {
cpu = "50m"
memory = "64Mi"
}
max = {
memory = "4Gi"
}
}
]
}
}
}
},
# Fallback: namespaces without a tier label get aux-level defaults
# requests = limits to prevent memory overcommit (2026-03-14 node2 OOM incident)
{
name = "limitrange-no-tier-fallback"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchExpressions = [
{
key = "tier"
operator = "Exists"
}
]
}
}
},
{
resources = {
namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "LimitRange"
name = "tier-defaults"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
limits = [
{
type = "Container"
default = {
memory = "128Mi"
}
defaultRequest = {
cpu = "50m"
memory = "128Mi"
}
max = {
memory = "2Gi"
}
}
]
}
}
}
},
]
}
}
}
# -----------------------------------------------------------------------------
# Layer 3: ResourceQuotas (Kyverno Generate)
# -----------------------------------------------------------------------------
# Creates a ResourceQuota in each namespace based on its tier label.
# Sets hard ceiling on total namespace resource consumption.
# Namespaces with label resource-governance/custom-quota=true are excluded.
#
# IMPORTANT: LimitRange (Layer 2) must exist before ResourceQuota takes effect,
# because ResourceQuota requires all pods to have resource requests set.
resource "kubernetes_manifest" "generate_resourcequota_by_tier" {
depends_on = [kubernetes_manifest.generate_limitrange_by_tier]
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "generate-resourcequota-by-tier"
annotations = {
"policies.kyverno.io/title" = "Generate ResourceQuota by Tier"
"policies.kyverno.io/description" = "Creates tier-appropriate ResourceQuota in namespaces based on their tier label. Excludes namespaces with resource-governance/custom-quota label."
}
}
spec = {
generateExisting = true
rules = [
# Tier 0-core
{
name = "quota-tier-0-core"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "0-core"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-quota" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "ResourceQuota"
name = "tier-quota"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
hard = {
"requests.cpu" = "8"
"requests.memory" = "8Gi"
"limits.memory" = "64Gi"
pods = "100"
}
}
}
}
},
# Tier 1-cluster
{
name = "quota-tier-1-cluster"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "1-cluster"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-quota" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "ResourceQuota"
name = "tier-quota"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
hard = {
"requests.cpu" = "4"
"requests.memory" = "4Gi"
"limits.memory" = "32Gi"
pods = "30"
}
}
}
}
},
# Tier 2-gpu
{
name = "quota-tier-2-gpu"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "2-gpu"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-quota" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "ResourceQuota"
name = "tier-quota"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
hard = {
"requests.cpu" = "8"
"requests.memory" = "8Gi"
"limits.memory" = "32Gi"
pods = "40"
}
}
}
}
},
# Tier 3-edge
{
name = "quota-tier-3-edge"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "3-edge"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-quota" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "ResourceQuota"
name = "tier-quota"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
hard = {
"requests.cpu" = "4"
"requests.memory" = "4Gi"
"limits.memory" = "32Gi"
pods = "30"
}
}
}
}
},
# Tier 4-aux
{
name = "quota-tier-4-aux"
match = {
any = [
{
resources = {
kinds = ["Namespace"]
selector = {
matchLabels = {
tier = "4-aux"
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
selector = {
matchLabels = {
"resource-governance/custom-quota" = "true"
}
}
}
}
]
}
generate = {
synchronize = true
apiVersion = "v1"
kind = "ResourceQuota"
name = "tier-quota"
namespace = "{{request.object.metadata.name}}"
data = {
spec = {
hard = {
"requests.cpu" = "2"
"requests.memory" = "2Gi"
"limits.memory" = "16Gi"
pods = "20"
}
}
}
}
},
]
}
}
}
# -----------------------------------------------------------------------------
# Layer 4: PriorityClassName Injection (Kyverno Mutate)
# -----------------------------------------------------------------------------
# Automatically sets priorityClassName on Pods based on their namespace's tier label.
# Skips pods that already have a priorityClassName set.
# Uses namespaceSelector instead of API calls no round-trip to the API server.
resource "kubernetes_manifest" "mutate_priority_from_tier" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "inject-priority-class-from-tier"
annotations = {
"policies.kyverno.io/title" = "Inject PriorityClass from Tier"
"policies.kyverno.io/description" = "Sets priorityClassName on Pods based on the namespace tier label. Skips pods that already have a priorityClassName."
}
}
spec = {
rules = [for tier in local.governance_tiers : {
name = "inject-priority-${tier}"
match = {
any = [
{
resources = {
kinds = ["Pod"]
operations = ["CREATE"]
namespaceSelector = {
matchLabels = {
tier = tier
}
}
}
}
]
}
exclude = {
any = [
{
resources = {
namespaces = local.excluded_namespaces
}
}
]
}
preconditions = {
all = [
{
key = "{{request.object.spec.priorityClassName || ''}}"
operator = "Equals"
value = ""
}
]
}
mutate = {
patchesJson6902 = yamlencode([
{
op = "remove"
path = "/spec/priority"
},
{
op = "remove"
path = "/spec/preemptionPolicy"
},
{
op = "add"
path = "/spec/priorityClassName"
value = "tier-${tier}"
}
])
}
}]
}
}
}
# --- ndots:2 injection ---
# Kubernetes defaults to ndots:5, which causes 4 wasted NxDomain queries per
# external DNS lookup (search domain expansion). This policy injects ndots:2
# on all pods to reduce NxDomain flood while still allowing short-name service
# resolution (e.g. "redis.redis" has 1 dot, so it still expands).
resource "kubernetes_manifest" "mutate_ndots" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "inject-ndots"
annotations = {
"policies.kyverno.io/title" = "Inject ndots:2 DNS Config"
"policies.kyverno.io/description" = "Sets ndots:2 on all Pods to reduce NxDomain query flood from search domain expansion. Skips pods that already have ndots configured."
}
}
spec = {
rules = [
{
name = "inject-ndots-2"
match = {
any = [
{
resources = {
kinds = ["Pod"]
}
}
]
}
exclude = {
any = [
{
resources = {
namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
}
}
]
}
preconditions = {
all = [
{
key = "{{ request.object.spec.dnsConfig.options || `[]` | [?name == 'ndots'] | length(@) }}"
operator = "Equals"
value = "0"
}
]
}
mutate = {
patchStrategicMerge = {
spec = {
dnsConfig = {
options = [
{
name = "ndots"
value = "2"
}
]
}
}
}
}
}
]
}
}
}
# -----------------------------------------------------------------------------
# Layer 5: GPU Workload Priority Override (Kyverno Mutate)
# -----------------------------------------------------------------------------
# Overrides the tier-based priorityClassName with gpu-workload for pods that
# actually request nvidia.com/gpu resources. This ensures GPU pods can preempt
# non-GPU pods on the GPU node, regardless of namespace tier.
# Runs after Layer 4 (tier injection), so it overrides the tier-based priority.
resource "kubernetes_manifest" "mutate_gpu_priority" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "inject-gpu-workload-priority"
annotations = {
"policies.kyverno.io/title" = "Inject GPU Workload Priority"
"policies.kyverno.io/description" = "Overrides priorityClassName to gpu-workload for pods requesting nvidia.com/gpu resources. Runs after tier-based injection."
}
}
spec = {
rules = [
{
name = "gpu-priority-override"
match = {
any = [
{
resources = {
kinds = ["Pod"]
operations = ["CREATE"]
}
}
]
}
exclude = {
any = [
{
resources = {
namespaces = local.excluded_namespaces
}
}
]
}
preconditions = {
any = [
{
key = "{{ request.object.spec.containers[].resources.requests.\"nvidia.com/gpu\" || '' }}"
operator = "NotEquals"
value = ""
},
{
key = "{{ request.object.spec.containers[].resources.limits.\"nvidia.com/gpu\" || '' }}"
operator = "NotEquals"
value = ""
}
]
}
mutate = {
patchesJson6902 = yamlencode([
{
op = "replace"
path = "/spec/priorityClassName"
value = "gpu-workload"
},
{
op = "replace"
path = "/spec/priority"
value = 1200000
},
{
op = "replace"
path = "/spec/preemptionPolicy"
value = "PreemptLowerPriority"
}
])
}
}
]
}
}
}

View file

@ -1,294 +0,0 @@
# =============================================================================
# Pod Security Policies (Audit Mode)
# =============================================================================
# Kyverno validate policies for pod security standards.
# All policies start in Audit mode - violations are logged but not blocked.
resource "kubernetes_manifest" "policy_deny_privileged" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "deny-privileged-containers"
annotations = {
"policies.kyverno.io/title" = "Deny Privileged Containers"
"policies.kyverno.io/category" = "Pod Security"
"policies.kyverno.io/severity" = "high"
"policies.kyverno.io/description" = "Privileged containers have full host access. Deny unless explicitly exempted."
}
}
spec = {
validationFailureAction = "Audit"
background = true
rules = [{
name = "deny-privileged"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
exclude = {
any = [{
resources = {
namespaces = ["frigate", "nvidia", "monitoring"]
}
}]
}
validate = {
message = "Privileged containers are not allowed. Use specific capabilities instead."
pattern = {
spec = {
containers = [{
"=(securityContext)" = {
"=(privileged)" = false
}
}]
"=(initContainers)" = [{
"=(securityContext)" = {
"=(privileged)" = false
}
}]
}
}
}
}]
}
}
depends_on = [helm_release.kyverno]
}
resource "kubernetes_manifest" "policy_deny_host_namespaces" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "deny-host-namespaces"
annotations = {
"policies.kyverno.io/title" = "Deny Host Namespaces"
"policies.kyverno.io/category" = "Pod Security"
"policies.kyverno.io/severity" = "high"
"policies.kyverno.io/description" = "Sharing host namespaces enables container escapes. Deny hostNetwork, hostPID, hostIPC."
}
}
spec = {
validationFailureAction = "Audit"
background = true
rules = [{
name = "deny-host-namespaces"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
exclude = {
any = [{
resources = {
namespaces = ["frigate", "monitoring"]
}
}]
}
validate = {
message = "Host namespaces (hostNetwork, hostPID, hostIPC) are not allowed."
pattern = {
spec = {
"=(hostNetwork)" = false
"=(hostPID)" = false
"=(hostIPC)" = false
}
}
}
}]
}
}
depends_on = [helm_release.kyverno]
}
resource "kubernetes_manifest" "policy_restrict_capabilities" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "restrict-sys-admin"
annotations = {
"policies.kyverno.io/title" = "Restrict SYS_ADMIN Capability"
"policies.kyverno.io/category" = "Pod Security"
"policies.kyverno.io/severity" = "high"
"policies.kyverno.io/description" = "SYS_ADMIN is nearly equivalent to root. Restrict to explicitly exempted namespaces."
}
}
spec = {
validationFailureAction = "Audit"
background = true
rules = [{
name = "restrict-sys-admin"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
exclude = {
any = [{
resources = {
namespaces = ["nvidia", "monitoring"]
}
}]
}
validate = {
message = "Adding SYS_ADMIN capability is not allowed."
deny = {
conditions = {
any = [{
key = "{{ request.object.spec.containers[].securityContext.capabilities.add[] || `[]` }}"
operator = "AnyIn"
value = ["SYS_ADMIN"]
}]
}
}
}
}]
}
}
depends_on = [helm_release.kyverno]
}
# =============================================================================
# Image Pull Policy Governance
# =============================================================================
# Mutate imagePullPolicy to IfNotPresent for all containers with pinned tags
# (non-:latest). This prevents pods from getting stuck in ImagePullBackOff
# when the pull-through cache at 10.0.20.10 has transient failures.
# For :latest or untagged images, set to Always so stale images don't persist.
resource "kubernetes_manifest" "policy_set_image_pull_policy" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "set-image-pull-policy"
annotations = {
"policies.kyverno.io/title" = "Set Image Pull Policy"
"policies.kyverno.io/category" = "Best Practices"
"policies.kyverno.io/severity" = "medium"
"policies.kyverno.io/description" = "Set imagePullPolicy to IfNotPresent for pinned tags and Always for :latest to prevent ImagePullBackOff from transient cache failures."
}
}
spec = {
background = false
rules = [
{
name = "set-ifnotpresent-for-pinned-tags"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
mutate = {
foreach = [{
list = "request.object.spec.containers"
preconditions = {
all = [{
key = "{{ ends_with(element.image, ':latest') || !contains(element.image, ':') }}"
operator = "Equals"
value = false
}]
}
patchStrategicMerge = {
spec = {
containers = [{
name = "{{ element.name }}"
imagePullPolicy = "IfNotPresent"
}]
}
}
}]
}
},
{
name = "set-always-for-latest"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
mutate = {
foreach = [{
list = "request.object.spec.containers"
preconditions = {
all = [{
key = "{{ ends_with(element.image, ':latest') || !contains(element.image, ':') }}"
operator = "Equals"
value = true
}]
}
patchStrategicMerge = {
spec = {
containers = [{
name = "{{ element.name }}"
imagePullPolicy = "Always"
}]
}
}
}]
}
}
]
}
}
depends_on = [helm_release.kyverno]
}
resource "kubernetes_manifest" "policy_require_trusted_registries" {
manifest = {
apiVersion = "kyverno.io/v1"
kind = "ClusterPolicy"
metadata = {
name = "require-trusted-registries"
annotations = {
"policies.kyverno.io/title" = "Require Trusted Image Registries"
"policies.kyverno.io/category" = "Pod Security"
"policies.kyverno.io/severity" = "medium"
"policies.kyverno.io/description" = "Images must come from trusted registries to prevent supply chain attacks."
}
}
spec = {
validationFailureAction = "Audit"
background = true
rules = [{
name = "validate-registries"
match = {
any = [{
resources = {
kinds = ["Pod"]
}
}]
}
validate = {
message = "Images must be from trusted registries (docker.io, ghcr.io, quay.io, registry.k8s.io, or local cache)."
pattern = {
spec = {
containers = [{
image = "docker.io/* | ghcr.io/* | quay.io/* | registry.k8s.io/* | 10.0.20.10* | */*"
}]
}
}
}
}]
}
}
depends_on = [helm_release.kyverno]
}

View file

@ -1,5 +0,0 @@
firmly-gerardo-generated@viktorbarzin.me me@viktorbarzin.me
closely-keith-generated@viktorbarzin.me vbarzin@gmail.com
literally-paolo-generated@viktorbarzin.me viktorbarzin@fb.com
hastily-stefanie-generated@viktorbarzin.me elliestamenova@gmail.com
vaultwarden@viktorbarzin.me me@viktorbarzin.me

View file

@ -1,504 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "mailserver_accounts" {}
variable "postfix_account_aliases" {}
variable "opendkim_key" {}
variable "sasl_passwd" {} # For sendgrid i.e relayhost
variable "nfs_server" { type = string }
resource "kubernetes_namespace" "mailserver" {
metadata {
name = "mailserver"
labels = {
tier = var.tier
}
# connecting via localhost does not seem to work?
# labels = {
# "istio-injection" : "enabled"
# }
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.mailserver.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_config_map" "mailserver_env_config" {
metadata {
name = "mailserver.env.config"
namespace = kubernetes_namespace.mailserver.metadata[0].name
labels = {
app = "mailserver"
}
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
DMS_DEBUG = "0"
# LOG_LEVEL = "debug"
ENABLE_CLAMAV = "0"
ENABLE_AMAVIS = "0"
ENABLE_FAIL2BAN = "0"
ENABLE_FETCHMAIL = "0"
ENABLE_POSTGREY = "0"
ENABLE_SASLAUTHD = "0"
ENABLE_SPAMASSASSIN = "0"
ENABLE_RSPAMD = "1"
ENABLE_OPENDKIM = "0"
ENABLE_OPENDMARC = "0"
RSPAMD_LEARN = "1"
ENABLE_SRS = "1"
FETCHMAIL_POLL = "120"
ONE_DIR = "1"
OVERRIDE_HOSTNAME = "mail.viktorbarzin.me"
POSTFIX_MESSAGE_SIZE_LIMIT = 1024 * 1024 * 200 # 200 MB
POSTFIX_REJECT_UNKNOWN_CLIENT_HOSTNAME = "1"
# TLS_LEVEL = "intermediate"
# DEFAULT_RELAY_HOST = "[smtp.sendgrid.net]:587"
DEFAULT_RELAY_HOST = "[smtp.eu.mailgun.org]:587"
SPOOF_PROTECTION = "1"
SSL_TYPE = "manual"
SSL_CERT_PATH = "/tmp/ssl/tls.crt"
SSL_KEY_PATH = "/tmp/ssl/tls.key"
}
}
resource "kubernetes_config_map" "mailserver_config" {
metadata {
name = "mailserver.config"
namespace = kubernetes_namespace.mailserver.metadata[0].name
labels = {
app = "mailserver"
}
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
# Actual mail settings
"postfix-accounts.cf" = join("\n", [for user, pass in var.mailserver_accounts : "${user}|${bcrypt(pass, 6)}"])
"postfix-main.cf" = var.postfix_cf
"postfix-virtual.cf" = format("%s%s", var.postfix_account_aliases, file("${path.module}/extra/aliases.txt"))
KeyTable = "mail._domainkey.viktorbarzin.me viktorbarzin.me:mail:/etc/opendkim/keys/viktorbarzin.me-mail.key\n"
SigningTable = "*@viktorbarzin.me mail._domainkey.viktorbarzin.me\n"
TrustedHosts = "127.0.0.1\nlocalhost\n"
"sasl_passwd" = var.sasl_passwd
# Rspamd DKIM signing configuration
"dkim_signing.conf" = <<-EOF
enabled = true;
sign_authenticated = true;
sign_local = true;
use_domain = "header";
use_redis = false;
use_esld = true;
selector = "mail";
path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private";
domain {
viktorbarzin.me {
path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private";
selector = "mail";
}
}
EOF
fail2ban_conf = <<-EOF
[DEFAULT]
#logtarget = /var/log/fail2ban.log
logtarget = SYSOUT
EOF
}
# Password hashes are different each time and avoid changing secret constantly.
# Either 1.Create consistent hashes or 2.Find a way to ignore_changes on per password
lifecycle {
ignore_changes = [data["postfix-accounts.cf"]]
}
}
# resource "kubernetes_config_map" "user_patches" {
# metadata {
# name = "user-patches"
# namespace = kubernetes_namespace.mailserver.metadata[0].name
# labels = {
# "app" = "mailserver"
# }
# }
# data = {
# user_patches = <<EOF
# #!/bin/bash
# cp -f /tmp/dovecot.key /etc/dovecot/ssl/dovecot.key
# cp -f /tmp/dovecot.crt /etc/dovecot/ssl/dovecot.pem
# EOF
# }
# }
resource "kubernetes_secret" "opendkim_key" {
metadata {
name = "mailserver.opendkim.key"
namespace = kubernetes_namespace.mailserver.metadata[0].name
labels = {
"app" = "mailserver"
}
}
type = "Opaque"
data = {
"viktorbarzin.me-mail.key" = var.opendkim_key
}
}
module "nfs_data" {
source = "../../../../modules/kubernetes/nfs_volume"
name = "mailserver-data"
namespace = kubernetes_namespace.mailserver.metadata[0].name
nfs_server = var.nfs_server
nfs_path = "/mnt/main/mailserver"
}
resource "kubernetes_deployment" "mailserver" {
metadata {
name = "mailserver"
namespace = kubernetes_namespace.mailserver.metadata[0].name
labels = {
"app" = "mailserver"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = "1"
strategy {
type = "Recreate"
}
selector {
match_labels = {
"app" = "mailserver"
}
}
template {
metadata {
annotations = {
# "diun.enable" = "true"
}
labels = {
"app" = "mailserver"
"role" = "mail"
}
}
spec {
container {
name = "docker-mailserver"
image = "docker.io/mailserver/docker-mailserver:15.0.0"
image_pull_policy = "IfNotPresent"
security_context {
capabilities {
add = ["NET_ADMIN"]
}
}
lifecycle {
post_start {
exec {
command = [
"postmap",
"/etc/postfix/sasl/passwd"
# "/bin/sh",
# "-c",
# "cp -f /tmp/user-patches.sh /tmp/docker-mailserver/user-patches.sh && chown root:root /var/log/mail && chmod 755 /var/log/mail",
]
}
}
}
volume_mount {
name = "config-tls"
mount_path = "/tmp/ssl/tls.key"
sub_path = "tls.key"
read_only = true
}
volume_mount {
name = "config-tls"
mount_path = "/tmp/ssl/tls.crt"
sub_path = "tls.crt"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/postfix-accounts.cf"
sub_path = "postfix-accounts.cf"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/postfix-main.cf"
sub_path = "postfix-main.cf"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/postfix-virtual.cf"
sub_path = "postfix-virtual.cf"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/fetchmail.cf"
sub_path = "fetchmail.cf"
read_only = true
}
# volume_mount {
# name = "config"
# mount_path = "/tmp/docker-mailserver/dovecot.cf"
# sub_path = "dovecot.cf"
# read_only = true
# }
# volume_mount {
# name = "user-patches"
# mount_path = "/tmp/user-patches.sh"
# sub_path = "user-patches.sh"
# read_only = true
# }
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/opendkim/SigningTable"
sub_path = "SigningTable"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/opendkim/KeyTable"
sub_path = "KeyTable"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/opendkim/TrustedHosts"
sub_path = "TrustedHosts"
read_only = true
}
volume_mount {
name = "opendkim-key"
mount_path = "/tmp/docker-mailserver/opendkim/keys"
read_only = true
}
volume_mount {
name = "opendkim-key"
mount_path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private"
sub_path = "viktorbarzin.me-mail.key"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/tmp/docker-mailserver/rspamd/override.d/dkim_signing.conf"
sub_path = "dkim_signing.conf"
read_only = true
}
volume_mount {
name = "data"
mount_path = "/var/mail"
sub_path = "data"
}
volume_mount {
name = "data"
mount_path = "/var/mail-state"
sub_path = "state"
}
volume_mount {
name = "data"
mount_path = "/var/log/mail"
sub_path = "log"
}
volume_mount {
name = "var-run-dovecot"
mount_path = "/var/run/dovecot"
}
volume_mount {
name = "config"
mount_path = "/etc/postfix/sasl/passwd"
sub_path = "sasl_passwd"
read_only = true
}
volume_mount {
name = "config"
mount_path = "/etc/fail2ban/fail2ban.local"
sub_path = "fail2ban_conf"
read_only = true
}
port {
name = "smtp"
container_port = 25
protocol = "TCP"
}
port {
name = "smtp-secure"
container_port = 465
protocol = "TCP"
}
port {
name = "smtp-auth"
container_port = 587
protocol = "TCP"
}
port {
name = "imap-secure"
container_port = 993
protocol = "TCP"
}
env_from {
config_map_ref {
name = "mailserver.env.config"
}
}
resources {
requests = {
cpu = "25m"
memory = "512Mi"
}
limits = {
memory = "512Mi"
}
}
}
container {
name = "dovecot-exporter"
image = "viktorbarzin/dovecot_exporter:latest"
command = [
"/dovecot_exporter/exporter",
"--dovecot.socket-path=/var/run/dovecot/stats-reader"
]
image_pull_policy = "IfNotPresent"
port {
name = "dovecotexporter"
container_port = 9166
protocol = "TCP"
}
volume_mount {
name = "var-run-dovecot"
mount_path = "/var/run/dovecot"
}
resources {
requests = {
cpu = "10m"
memory = "32Mi"
}
limits = {
memory = "32Mi"
}
}
}
volume {
name = "config"
config_map {
name = "mailserver.config"
}
}
volume {
name = "config-tls"
secret {
secret_name = var.tls_secret_name
}
}
volume {
name = "opendkim-key"
secret {
secret_name = "mailserver.opendkim.key"
}
}
volume {
name = "data"
persistent_volume_claim {
claim_name = module.nfs_data.claim_name
}
# iscsi {
# target_portal = "iscsi.viktorbarzin.lan:3260"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:mailserver"
# lun = 0
# fs_type = "ext4"
# }
}
# volume {
# name = "user-patches"
# config_map {
# name = "user-patches"
# }
# }
volume {
name = "var-run-dovecot"
empty_dir {}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "mailserver" {
metadata {
name = "mailserver"
namespace = kubernetes_namespace.mailserver.metadata[0].name
labels = {
app = "mailserver"
}
annotations = {
"metallb.io/loadBalancerIPs" = "10.0.20.200"
"metallb.io/allow-shared-ip" = "shared"
}
}
spec {
type = "LoadBalancer"
external_traffic_policy = "Cluster"
selector = {
app = "mailserver"
}
port {
name = "smtp"
protocol = "TCP"
port = 25
target_port = "smtp"
}
port {
name = "smtp-secure"
protocol = "TCP"
port = 465
target_port = "smtp-secure"
}
port {
name = "smtp-auth"
protocol = "TCP"
port = 587
target_port = "smtp-auth"
}
port {
name = "imap-secure"
protocol = "TCP"
port = 993
target_port = "imap-secure"
}
}
}

View file

@ -1,237 +0,0 @@
variable "roundcube_db_password" {
type = string
sensitive = true
}
variable "mysql_host" { type = string }
module "nfs_roundcube_html" {
source = "../../../../modules/kubernetes/nfs_volume"
name = "roundcubemail-html"
namespace = kubernetes_namespace.mailserver.metadata[0].name
nfs_server = var.nfs_server
nfs_path = "/mnt/main/roundcubemail/html"
}
module "nfs_roundcube_enigma" {
source = "../../../../modules/kubernetes/nfs_volume"
name = "roundcubemail-enigma"
namespace = kubernetes_namespace.mailserver.metadata[0].name
nfs_server = var.nfs_server
nfs_path = "/mnt/main/roundcubemail/enigma"
}
# If you want to override settings mount this in /var/roundcube/config
# more info in https://github.com/roundcube/roundcubemail-docker?tab=readme-ov-file
# resource "kubernetes_config_map" "roundcubemail_config" {
# metadata {
# name = "roundcubemail.config"
# namespace = "mailserver"
# labels = {
# app = "mailserver"
# }
# annotations = {
# "reloader.stakater.com/match" = "true"
# }
# }
# data = {
# # if you want to override things see https://github.com/roundcube/roundcubemail/blob/master/config/defaults.inc.php
# "imap.php" = <<-EOF
# <?php
# $config['imap_host'] = 'ssl://mail.viktorbarzin.me:993';
# ?>
# EOF
# }
# }
resource "kubernetes_deployment" "roundcubemail" {
metadata {
name = "roundcubemail"
namespace = "mailserver"
labels = {
"app" = "roundcubemail"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = "1"
strategy {
type = "RollingUpdate"
}
selector {
match_labels = {
"app" = "roundcubemail"
}
}
template {
metadata {
labels = {
"app" = "roundcubemail"
}
}
spec {
container {
name = "roundcube"
image = "roundcube/roundcubemail:1.6.13-apache"
# Uncomment me to mount additional settings
# volume_mount {
# name = "imap-config"
# mount_path = "/var/roundcube/config/imap.php"
# sub_path = "imap.php"
# }
env {
name = "ROUNDCUBEMAIL_DEFAULT_HOST"
value = "ssl://mail.viktorbarzin.me" # tls cert must be valid!
}
env {
name = "ROUNDCUBEMAIL_DEFAULT_PORT"
value = "993"
}
env {
name = "ROUNDCUBEMAIL_SMTP_SERVER"
value = "tls://mail.viktorbarzin.me" # tls cert must be valid!
}
env {
name = "ROUNDCUBEMAIL_SMTP_PORT"
value = 587
}
# DB Settings
env {
name = "ROUNDCUBEMAIL_DB_TYPE"
value = "mysql"
}
env {
name = "ROUNDCUBEMAIL_DB_HOST"
value = var.mysql_host
}
env {
name = "ROUNDCUBEMAIL_DB_USER"
value = "roundcubemail"
}
env {
name = "ROUNDCUBEMAIL_DB_PASSWORD"
value = var.roundcube_db_password
}
# Plugins
env {
name = "ROUNDCUBEMAIL_COMPOSER_PLUGINS"
value = "mmvi/twofactor_webauthn,texxasrulez/persistent_login,dsoares/rcguard"
}
env {
name = "ROUNDCUBEMAIL_PLUGINS"
value = "attachment_reminder,database_attachments,enigma,twofactor_webauthn,persistent_login,rcguard"
}
env {
name = "ROUNDCUBEMAIL_SMTP_DEBUG"
value = "false"
}
env {
name = "ROUNDCUBEMAIL_DEBUG_LEVEL"
value = "1"
}
env {
name = "ROUNDCUBEMAIL_LOG_DRIVER"
# value = "file"
value = "syslog"
}
port {
name = "web"
container_port = 80
protocol = "TCP"
}
volume_mount {
name = "html"
mount_path = "/var/www/html"
}
volume_mount {
name = "enigma"
mount_path = "/var/roundcube/enigma"
}
resources {
requests = {
cpu = "25m"
memory = "192Mi"
}
limits = {
memory = "192Mi"
}
}
}
# volume {
# name = "imap-config"
# config_map {
# name = "roundcubemail.config"
# }
# }
volume {
name = "html"
persistent_volume_claim {
claim_name = module.nfs_roundcube_html.claim_name
}
}
volume {
name = "enigma"
persistent_volume_claim {
claim_name = module.nfs_roundcube_enigma.claim_name
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "roundcubemail" {
metadata {
name = "roundcubemail"
namespace = "mailserver"
labels = {
app = "roundcubemail"
}
}
spec {
selector = {
app = "roundcubemail"
}
port {
name = "roundcube"
protocol = "TCP"
port = 80
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = "mailserver"
name = "mail"
service_name = "roundcubemail"
tls_secret_name = var.tls_secret_name
rybbit_site_id = "082f164faa7d"
extra_annotations = {
"gethomepage.dev/enabled" = "true"
"gethomepage.dev/name" = "Roundcube Mail"
"gethomepage.dev/description" = "Webmail client"
"gethomepage.dev/icon" = "roundcube.png"
"gethomepage.dev/group" = "Other"
"gethomepage.dev/pod-selector" = ""
}
}

View file

@ -1,163 +0,0 @@
# this is appended and merged to the main postfix.cf
# see defaults - https://github.com/docker-mailserver/docker-mailserver/blob/master/target/postfix/main.cf
variable "postfix_cf" {
default = <<EOT
#relayhost = [smtp.sendgrid.net]:587
relayhost = [smtp.eu.mailgun.org]:587
smtp_sasl_auth_enable = yes
smtp_sasl_password_maps = hash:/etc/postfix/sasl/passwd
smtp_sasl_security_options = noanonymous
smtp_sasl_tls_security_options = noanonymous
smtp_tls_security_level = encrypt
smtpd_tls_cert_file=/tmp/ssl/tls.crt
smtpd_tls_key_file=/tmp/ssl/tls.key
smtpd_use_tls=yes
header_size_limit = 4096000
# Debug mail tls
smtpd_tls_loglevel = 1
#smtpd_tls_ciphers = TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:!aNULL:!SEED:!CAMELLIA:!RSA+AES:!SHA1
#tls_medium_cipherlist = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:!aNULL:!SEED:!CAMELLIA:!RSA+AES:!SHA1
# Rate limiting (brute-force protection)
smtpd_client_connection_rate_limit = 10
smtpd_client_message_rate_limit = 30
anvil_rate_time_unit = 60s
EOT
}
variable "postfix_cf_reference_DO_NOT_USE" {
default = <<EOT
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
smtpd_banner = $myhostname ESMTP $mail_name (Debian)
biff = no
append_dot_mydomain = no
readme_directory = no
# Basic configuration
# myhostname =
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost.$mydomain, localhost
mynetworks = 127.0.0.0/8 [::1]/128 [fe80::]/64
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = ipv4
# TLS parameters
smtpd_tls_cert_file=/tmp/ssl/tls.crt
smtpd_tls_key_file=/tmp/ssl/tls.key
#smtpd_tls_CAfile=
#smtp_tls_CAfile=
smtpd_tls_security_level = may
smtpd_use_tls=yes
smtpd_tls_loglevel = 1
smtp_tls_loglevel = 1
tls_ssl_options = NO_COMPRESSION
tls_high_cipherlist = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS
tls_preempt_cipherlist = yes
smtpd_tls_protocols = !SSLv2,!SSLv3
smtp_tls_protocols = !SSLv2,!SSLv3
smtpd_tls_mandatory_ciphers = high
smtpd_tls_mandatory_protocols = !SSLv2,!SSLv3
smtpd_tls_exclude_ciphers = aNULL, LOW, EXP, MEDIUM, ADH, AECDH, MD5, DSS, ECDSA, CAMELLIA128, 3DES, CAMELLIA256, RSA+AES, eNULL
smtpd_tls_dh1024_param_file = /etc/postfix/dhparams.pem
smtpd_tls_CApath = /etc/ssl/certs
smtp_tls_CApath = /etc/ssl/certs
# Settings to prevent SPAM early
smtpd_helo_required = yes
smtpd_delay_reject = yes
smtpd_helo_restrictions = permit_mynetworks, reject_invalid_helo_hostname, permit
#smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
#smtpd_relay_restrictions = reject_sender_login_mismatch permit_sasl_authenticated permit_mynetworks defer_unauth_destination
smtpd_relay_restrictions = reject_sender_login_mismatch permit_sasl_authenticated permit_mynetworks defer_unauth_destination
smtpd_recipient_restrictions = permit_sasl_authenticated, reject_unauth_destination, reject_unauth_pipelining, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, reject_unknown_recipient_domain, reject_rbl_client bl.spamcop.net, permit_mynetworks
smtpd_client_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_unauth_destination, reject_unauth_pipelining
#smtpd_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, permit_mynetworks, reject_unknown_sender_domain
smtpd_sender_restrictions = reject_sender_login_mismatch, reject_authenticated_sender_login_mismatch, reject_unknown_sender_domain, permit_sasl_authenticated, permit_mynetworks
disable_vrfy_command = yes
# Postscreen settings to drop zombies/open relays/spam early
#postscreen_dnsbl_action = enforce
postscreen_dnsbl_action = ignore
postscreen_dnsbl_sites = zen.spamhaus.org*2
bl.mailspike.net
b.barracudacentral.org*2
bl.spameatingmonkey.net
bl.spamcop.net
dnsbl.sorbs.net
psbl.surriel.com
list.dnswl.org=127.0.[0..255].0*-2
list.dnswl.org=127.0.[0..255].1*-3
list.dnswl.org=127.0.[0..255].[2..3]*-4
postscreen_dnsbl_threshold = 3
postscreen_dnsbl_whitelist_threshold = -1
postscreen_greet_action = enforce
postscreen_bare_newline_action = enforce
# SASL
smtpd_sasl_auth_enable = no
#smtpd_sasl_auth_enable = yes
##smtpd_sasl_path = /var/spool/postfix/private/auth
#smtpd_sasl_path = /var/spool/postfix/private/smtpd
##smtpd_sasl_type = dovecot
#smtpd_sasl_type = dovecot
##smtpd_sasl_security_options = noanonymous
#smtpd_sasl_security_options = noanonymous
##smtpd_sasl_local_domain = $mydomain
##broken_sasl_auth_clients = yes
#broken_sasl_auth_clients = yes
# SMTP configuration
smtp_sasl_auth_enable = yes
smtp_sasl_password_maps = hash:/etc/postfix/sasl/passwd
smtp_sasl_security_options = noanonymous
smtp_sasl_tls_security_options = noanonymous
smtp_tls_security_level = encrypt
header_size_limit = 4096000
relayhost = [smtp.sendgrid.net]:587
# Mail directory
virtual_transport = lmtp:unix:/var/run/dovecot/lmtp
virtual_mailbox_domains = /etc/postfix/vhost
virtual_mailbox_maps = texthash:/etc/postfix/vmailbox
virtual_alias_maps = texthash:/etc/postfix/virtual
# Additional option for filtering
content_filter = smtp-amavis:[127.0.0.1]:10024
# Milters used by DKIM
milter_protocol = 6
milter_default_action = accept
dkim_milter = inet:localhost:8891
dmarc_milter = inet:localhost:8893
smtpd_milters = $dkim_milter,$dmarc_milter
non_smtpd_milters = $dkim_milter
# SPF policy settings
policyd-spf_time_limit = 3600
# Header checks for content inspection on receiving
header_checks = pcre:/etc/postfix/maps/header_checks.pcre
# Remove unwanted headers that reveail our privacy
smtp_header_checks = pcre:/etc/postfix/maps/sender_header_filter.pcre
myhostname = mail.viktorbarzin.me
mydomain = viktorbarzin.me
smtputf8_enable = no
message_size_limit = 20480000
sender_canonical_maps = tcp:localhost:10001
sender_canonical_classes = envelope_sender
recipient_canonical_maps = tcp:localhost:10002
recipient_canonical_classes = envelope_recipient,header_recipient
compatibility_level = 2
# enable_original_recipient = no # b4 uncommenting see https://serverfault.com/questions/661615/how-to-drop-orig-to-using-postfix-virtual-domains
always_add_missing_headers = yes
anvil_status_update_time = 5s
EOT
}

View file

@ -1,40 +0,0 @@
# Creates namespace and everythin needed
# Do not use until https://github.com/colinwilson/terraform-kubernetes-metallb/issues/5 is solved
# module "metallb" {
# source = "colinwilson/metallb/kubernetes"
# version = "0.1.7"
# }
variable "tier" { type = string }
resource "kubernetes_namespace" "metallb" {
metadata {
name = "metallb-system"
labels = {
app = "metallb"
# "istio-injection" : "disabled"
# tier = var.tier
}
}
}
module "metallb" {
source = "ViktorBarzin/metallb/kubernetes"
version = "0.1.5"
depends_on = [kubernetes_namespace.metallb]
}
resource "kubernetes_config_map" "config" {
metadata {
name = "config"
namespace = kubernetes_namespace.metallb.metadata[0].name
}
data = {
config = <<EOT
address-pools:
- name: default
protocol: layer2
addresses:
- 10.0.20.200-10.0.20.220
EOT
}
}

View file

@ -1,29 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
resource "kubernetes_namespace" "metrics-server" {
metadata {
name = "metrics-server"
labels = {
tier = var.tier
}
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.metrics-server.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "helm_release" "metrics-server" {
namespace = kubernetes_namespace.metrics-server.metadata[0].name
create_namespace = false
name = "metrics-server"
atomic = true
repository = "https://kubernetes-sigs.github.io/metrics-server/"
chart = "metrics-server"
values = [templatefile("${path.module}/values.yaml", {})]
}

View file

@ -1,8 +0,0 @@
args:
- "--kubelet-insecure-tls"
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
memory: 200Mi

View file

@ -1,27 +0,0 @@
# dockerhub: viktorbarzin/redfish-exporter
# repo: https://pkg.go.dev/github.com/jenningsloy318/redfish_exporter#section-readme
FROM golang:rc-bullseye AS builder
LABEL maintainer="Viktor Barzin <me@viktorbarzin.me>"
ARG ARCH=amd64
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH "$GOROOT/bin:$GOPATH/bin:$PATH"
ENV GO_VERSION 1.15.2
ENV GO111MODULE=on
# Build dependencies
RUN mkdir -p /go/src/github.com/ && \
git clone https://github.com/jenningsloy318/redfish_exporter /go/src/github.com/jenningsloy318/redfish_exporter && \
cd /go/src/github.com/jenningsloy318/redfish_exporter && \
make build
FROM golang:rc-bullseye
COPY --from=builder /go/src/github.com/jenningsloy318/redfish_exporter/build/redfish_exporter /usr/local/bin/redfish_exporter
RUN mkdir /etc/prometheus
# config file mounter at runtime
CMD ["/usr/local/bin/redfish_exporter", "--config.file", "/etc/prometheus/redfish_exporter.yml"]

View file

@ -1,207 +0,0 @@
alloy:
configMap:
content: |-
// Write your Alloy config here:
logging {
level = "info"
format = "logfmt"
}
loki.write "default" {
endpoint {
url = "http://loki.monitoring.svc.cluster.local:3100/loki/api/v1/push"
}
}
// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.
// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.
discovery.kubernetes "pod" {
role = "pod"
}
// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.
// If no rules are defined, then the input targets are exported as-is.
discovery.relabel "pod_logs" {
targets = discovery.kubernetes.pod.targets
// Label creation - "namespace" field from "__meta_kubernetes_namespace"
rule {
source_labels = ["__meta_kubernetes_namespace"]
action = "replace"
target_label = "namespace"
}
// Label creation - "pod" field from "__meta_kubernetes_pod_name"
rule {
source_labels = ["__meta_kubernetes_pod_name"]
action = "replace"
target_label = "pod"
}
// Label creation - "container" field from "__meta_kubernetes_pod_container_name"
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "container"
}
// Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name"
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
action = "replace"
target_label = "app"
}
// Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name
rule {
source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "job"
separator = "/"
replacement = "$1"
}
// Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log
rule {
source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "__path__"
separator = "/"
replacement = "/var/log/pods/*$1/*.log"
}
// Label creation - "container_runtime" field from "__meta_kubernetes_pod_container_id"
rule {
source_labels = ["__meta_kubernetes_pod_container_id"]
action = "replace"
target_label = "container_runtime"
regex = "^(\\S+):\\/\\/.+$"
replacement = "$1"
}
}
// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.
loki.source.kubernetes "pod_logs" {
targets = discovery.relabel.pod_logs.output
forward_to = [loki.process.pod_logs.receiver]
}
// loki.process receives log entries from other Loki components, applies one or more processing stages,
// and forwards the results to the list of receivers in the component's arguments.
loki.process "pod_logs" {
stage.static_labels {
values = {
cluster = "default",
}
}
forward_to = [loki.write.default.receiver]
}
// Node-level journal log collection for kernel panics, OOMs, hung tasks, etc.
// Ships system logs off-node so they survive hard resets.
loki.source.journal "node_journal" {
forward_to = [loki.process.journal.receiver]
relabel_rules = loki.relabel.journal.rules
labels = {
job = "node-journal",
}
max_age = "12h"
}
loki.relabel "journal" {
forward_to = []
rule {
source_labels = ["__journal__hostname"]
target_label = "node"
}
rule {
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
rule {
source_labels = ["__journal__transport"]
target_label = "transport"
}
}
// Forward warning+ journal entries (priority 0-4: emerg, alert, crit, err, warning)
// Also forwards kernel transport entries regardless of priority for OOM/panic detection.
loki.process "journal" {
stage.static_labels {
values = {
cluster = "default",
}
}
// Drop info/debug/notice entries that aren't from the kernel transport
stage.match {
selector = "{job=\"node-journal\", level=~\"info|notice|debug\", transport!=\"kernel\"}"
action = "drop"
}
forward_to = [loki.write.default.receiver]
}
// Kubernetes audit log collection from /var/log/kubernetes/audit.log
// Requires alloy.mounts.varlog=true to mount /var/log from the host
local.file_match "audit_logs" {
path_targets = [{
__path__ = "/var/log/kubernetes/audit.log",
job = "kubernetes-audit",
node = env("HOSTNAME"),
}]
}
loki.source.file "audit_logs" {
targets = local.file_match.audit_logs.targets
forward_to = [loki.write.default.receiver]
}
# Mount /var/log from the host for file-based log collection (audit logs)
mounts:
varlog: true
# Mount journal directories for loki.source.journal
extra:
- name: journal-run
mountPath: /run/log/journal
readOnly: true
- name: journal-var
mountPath: /var/log/journal
readOnly: true
- name: machine-id
mountPath: /etc/machine-id
readOnly: true
controller:
volumes:
extra:
- name: journal-run
hostPath:
path: /run/log/journal
type: DirectoryOrCreate
- name: journal-var
hostPath:
path: /var/log/journal
type: DirectoryOrCreate
- name: machine-id
hostPath:
path: /etc/machine-id
type: File
# Resource limits for DaemonSet pods
# Alloy tails logs from all containers on the node via K8s API and batches
# them to Loki. Memory scales with number of active log streams (~30-50 per node).
# 128Mi was OOMKilled; steady-state usage is ~400-450Mi per pod.
resources:
requests:
cpu: 50m
memory: 512Mi
limits:
memory: 1Gi

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,204 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": { "type": "datasource", "uid": "grafana" },
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Kubernetes API server audit logs from Loki",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": 0,
"links": [],
"panels": [
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
"id": 100,
"panels": [],
"title": "Recent Activity",
"type": "row"
},
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"description": "Recent Kubernetes API actions from audit logs",
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"custom": {
"align": "auto",
"cellOptions": { "type": "auto" },
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
}
},
"overrides": []
},
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 1 },
"id": 1,
"options": {
"cellHeight": "sm",
"footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false },
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "Time" }]
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"editorMode": "code",
"expr": "{job=\"kubernetes-audit\"} | json | line_format \"{{.user.username}} {{.verb}} {{.objectRef.resource}} {{.objectRef.namespace}}\"",
"legendFormat": "",
"queryType": "range",
"refId": "A"
}
],
"title": "Recent Actions",
"type": "table"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 13 },
"id": 101,
"panels": [],
"title": "Request Rates",
"type": "row"
},
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"description": "API request count by user over time",
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"insertNulls": false,
"lineInterpolation": "smooth",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "never",
"spanNulls": false,
"stacking": { "group": "A", "mode": "none" },
"thresholdsStyle": { "mode": "off" }
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [{ "color": "green", "value": null }]
},
"unit": "short"
},
"overrides": []
},
"gridPos": { "h": 10, "w": 24, "x": 0, "y": 14 },
"id": 2,
"options": {
"legend": { "calcs": ["sum", "lastNotNull"], "displayMode": "table", "placement": "bottom", "showLegend": true },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"editorMode": "code",
"expr": "sum by (user_username) (count_over_time({job=\"kubernetes-audit\"} | json [5m]))",
"legendFormat": "{{user_username}}",
"queryType": "range",
"refId": "A"
}
],
"title": "Request Count by User",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 24 },
"id": 102,
"panels": [],
"title": "Denied Requests",
"type": "row"
},
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"description": "API requests denied with HTTP 403+ status codes",
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"custom": {
"align": "auto",
"cellOptions": { "type": "auto" },
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{ "color": "green", "value": null },
{ "color": "red", "value": 403 }
]
}
},
"overrides": []
},
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 25 },
"id": 3,
"options": {
"cellHeight": "sm",
"footer": { "countRows": false, "fields": "", "reducer": ["sum"], "show": false },
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "Time" }]
},
"pluginVersion": "12.3.0",
"targets": [
{
"datasource": { "type": "loki", "uid": "P8E80F9AEF21F6940" },
"editorMode": "code",
"expr": "{job=\"kubernetes-audit\"} | json | responseStatus_code >= 403",
"legendFormat": "",
"queryType": "range",
"refId": "A"
}
],
"title": "Denied Requests (403+)",
"type": "table"
}
],
"preload": false,
"refresh": "30s",
"schemaVersion": 42,
"tags": ["kubernetes", "audit", "security"],
"templating": {
"list": []
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Kubernetes Audit Logs",
"uid": "k8s-audit",
"version": 1
}

View file

@ -1,288 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "Logs collected from Kubernetes, stored in Loki",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 15141,
"graphTooltip": 0,
"id": 25,
"links": [],
"panels": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 24,
"x": 0,
"y": 0
},
"id": 4,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": false
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "code",
"expr": "sum(count_over_time({namespace=~\"$namespace\", container =~\"$container\"} |= \"$query\" [$__interval]))",
"instant": false,
"legendFormat": "Log count",
"queryType": "range",
"range": true,
"refId": "A"
}
],
"type": "timeseries"
},
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"description": "Logs from services running in Kubernetes",
"gridPos": {
"h": 25,
"w": 24,
"x": 0,
"y": 4
},
"id": 2,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"editorMode": "code",
"expr": "{namespace=~\"$namespace\", container =~\"$container\"} |= \"$query\"",
"queryType": "range",
"refId": "A"
}
],
"type": "logs"
}
],
"refresh": "5s",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"description": "String to search for",
"hide": 0,
"label": "Search Query",
"name": "query",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
},
{
"allValue": ".+",
"current": {
"selected": true,
"text": [
"dbaas"
],
"value": [
"dbaas"
]
},
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"definition": "label_values(namespace)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "namespace",
"options": [],
"query": "label_values(namespace)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".+",
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"definition": "label_values(stream)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "stream",
"options": [],
"query": "label_values(stream)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".+",
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "loki",
"uid": "P8E80F9AEF21F6940"
},
"definition": "label_values(container)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "container",
"options": [],
"query": "label_values(container)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Loki Kubernetes Logs",
"uid": "o6-BGgnnk",
"version": 2,
"weekStart": ""
}

File diff suppressed because it is too large Load diff

View file

@ -1,816 +0,0 @@
{
"annotations": {
"list": [
{
"$$hashKey": "object:192",
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "This dashboard is to display the metrics from DCGM Exporter on a Kubernetes (1.13+) cluster",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 0,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "celsius"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 18,
"x": 0,
"y": 0
},
"id": 12,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_GPU_TEMP",
"instant": false,
"interval": "",
"legendFormat": "GPU 0",
"refId": "A"
}
],
"title": "GPU Temperature",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "#EAB839",
"value": 70
},
{
"color": "red",
"value": 80
}
]
},
"unit": "celsius"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 18,
"y": 0
},
"id": 14,
"options": {
"minVizHeight": 75,
"minVizWidth": 75,
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sizing": "auto"
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_GPU_TEMP",
"interval": "",
"legendFormat": "",
"range": true,
"refId": "A"
}
],
"title": "GPU Current Temp",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "watt"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 18,
"x": 0,
"y": 8
},
"id": 10,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_POWER_USAGE",
"interval": "",
"legendFormat": "GPU {{gpu}}",
"range": true,
"refId": "A"
}
],
"title": "GPU Power Usage",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"max": 2400,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "#EAB839",
"value": 1800
},
{
"color": "red",
"value": 2200
}
]
},
"unit": "watt"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 18,
"y": 8
},
"id": 16,
"options": {
"minVizHeight": 75,
"minVizWidth": 75,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"sum"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sizing": "auto"
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"exemplar": false,
"expr": "sum(nvidia_tesla_t4_DCGM_FI_DEV_POWER_USAGE)",
"instant": true,
"interval": "",
"legendFormat": "",
"range": false,
"refId": "A"
}
],
"title": "GPU Power Total",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 16
},
"id": 6,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_GPU_UTIL",
"interval": "",
"legendFormat": "GPU {{gpu}}",
"range": true,
"refId": "A"
}
],
"title": "GPU Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "decmbytes"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 16
},
"id": 18,
"options": {
"legend": {
"calcs": [
"mean",
"max"
],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_FB_USED",
"interval": "",
"legendFormat": "GPU {{gpu}}",
"range": true,
"refId": "A"
}
],
"title": "GPU Framebuffer Mem Used",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 2,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "hertz"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 24
},
"id": 2,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull",
"max"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "multi",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "nvidia_tesla_t4_DCGM_FI_DEV_SM_CLOCK* 1000000",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "GPU {{gpu}}",
"range": true,
"refId": "A"
}
],
"title": "GPU SM Clocks",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"id": 19,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.3.1",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"editorMode": "code",
"expr": "sum by (namespace) (gpu_pod_memory_used_bytes)",
"instant": false,
"legendFormat": "{{namespace}}",
"range": true,
"refId": "A"
}
],
"title": "GPU Memory per Application",
"type": "timeseries"
}
],
"preload": false,
"refresh": "auto",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-12h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "NVIDIA DCGM Exporter Dashboard",
"uid": "Oxed_c6Wz",
"version": 9
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,488 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": { "type": "datasource", "uid": "grafana" },
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Technitium DNS query logs from MySQL",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": null,
"links": [],
"panels": [
{
"title": "Total Queries",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"thresholds": {
"steps": [
{ "color": "green", "value": null }
]
}
},
"overrides": []
},
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
},
"targets": [
{
"rawSql": "SELECT COUNT(*) as total_queries FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Cached %",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"unit": "percentunit",
"thresholds": {
"steps": [
{ "color": "red", "value": null },
{ "color": "yellow", "value": 0.3 },
{ "color": "green", "value": 0.5 }
]
}
},
"overrides": []
},
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN response_type = 3 THEN 1 ELSE 0 END) / COUNT(*) as cached_pct FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Blocked %",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"unit": "percentunit",
"thresholds": {
"steps": [
{ "color": "green", "value": null },
{ "color": "yellow", "value": 0.1 },
{ "color": "red", "value": 0.3 }
]
}
},
"overrides": []
},
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN response_type = 4 THEN 1 ELSE 0 END) / COUNT(*) as blocked_pct FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "NxDomain %",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 12, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"unit": "percentunit",
"thresholds": {
"steps": [
{ "color": "green", "value": null },
{ "color": "yellow", "value": 0.2 },
{ "color": "red", "value": 0.5 }
]
}
},
"overrides": []
},
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN rcode = 3 THEN 1 ELSE 0 END) / COUNT(*) as nxdomain_pct FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Avg Response Time",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 16, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "thresholds" },
"unit": "ms",
"thresholds": {
"steps": [
{ "color": "green", "value": null },
{ "color": "yellow", "value": 50 },
{ "color": "red", "value": 200 }
]
}
},
"overrides": []
},
"options": {
"colorMode": "value",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
},
"targets": [
{
"rawSql": "SELECT AVG(response_rtt) as avg_rtt_ms FROM dns_logs WHERE $__timeFilter(timestamp) AND response_rtt IS NOT NULL",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Queries by Protocol",
"type": "stat",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 4, "w": 4, "x": 20, "y": 0 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" }
},
"overrides": []
},
"options": {
"colorMode": "background",
"graphMode": "none",
"justifyMode": "auto",
"textMode": "auto",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": true }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN protocol = 0 THEN 1 ELSE 0 END) as UDP, SUM(CASE WHEN protocol = 1 THEN 1 ELSE 0 END) as TCP, SUM(CASE WHEN protocol = 3 THEN 1 ELSE 0 END) as DoH, SUM(CASE WHEN protocol = 4 THEN 1 ELSE 0 END) as DoT FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Queries Over Time",
"type": "timeseries",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 4 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 50,
"gradientMode": "none",
"hideFrom": { "legend": false, "tooltip": false, "viz": false },
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": { "type": "linear" },
"showPoints": "never",
"spanNulls": false,
"stacking": { "group": "A", "mode": "normal" }
}
},
"overrides": []
},
"options": {
"legend": { "calcs": ["sum"], "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"rawSql": "SELECT $__timeGroup(timestamp, $__interval) as time, SUM(CASE WHEN response_type = 1 THEN 1 ELSE 0 END) as Authoritative, SUM(CASE WHEN response_type = 2 THEN 1 ELSE 0 END) as Recursive, SUM(CASE WHEN response_type = 3 THEN 1 ELSE 0 END) as Cached, SUM(CASE WHEN response_type = 4 THEN 1 ELSE 0 END) as Blocked, SUM(CASE WHEN response_type = 5 THEN 1 ELSE 0 END) as Dropped FROM dns_logs WHERE $__timeFilter(timestamp) GROUP BY time ORDER BY time",
"format": "time_series",
"refId": "A"
}
]
},
{
"title": "Response Codes",
"type": "piechart",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 8, "w": 8, "x": 0, "y": 12 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "NOERROR" }, "properties": [{ "id": "color", "value": { "fixedColor": "green", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "NXDOMAIN" }, "properties": [{ "id": "color", "value": { "fixedColor": "yellow", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "SERVFAIL" }, "properties": [{ "id": "color", "value": { "fixedColor": "red", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "REFUSED" }, "properties": [{ "id": "color", "value": { "fixedColor": "orange", "mode": "fixed" } }] }
]
},
"options": {
"legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] },
"pieType": "donut",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": true },
"tooltip": { "mode": "single" }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN rcode = 0 THEN 1 ELSE 0 END) as NOERROR, SUM(CASE WHEN rcode = 2 THEN 1 ELSE 0 END) as SERVFAIL, SUM(CASE WHEN rcode = 3 THEN 1 ELSE 0 END) as NXDOMAIN, SUM(CASE WHEN rcode = 5 THEN 1 ELSE 0 END) as REFUSED, SUM(CASE WHEN rcode NOT IN (0,2,3,5) THEN 1 ELSE 0 END) as Other FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Response Types",
"type": "piechart",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 8, "w": 8, "x": 8, "y": 12 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "Cached" }, "properties": [{ "id": "color", "value": { "fixedColor": "green", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "Blocked" }, "properties": [{ "id": "color", "value": { "fixedColor": "red", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "Recursive" }, "properties": [{ "id": "color", "value": { "fixedColor": "blue", "mode": "fixed" } }] },
{ "matcher": { "id": "byName", "options": "Authoritative" }, "properties": [{ "id": "color", "value": { "fixedColor": "purple", "mode": "fixed" } }] }
]
},
"options": {
"legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] },
"pieType": "donut",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": true },
"tooltip": { "mode": "single" }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN response_type = 1 THEN 1 ELSE 0 END) as Authoritative, SUM(CASE WHEN response_type = 2 THEN 1 ELSE 0 END) as Recursive, SUM(CASE WHEN response_type = 3 THEN 1 ELSE 0 END) as Cached, SUM(CASE WHEN response_type = 4 THEN 1 ELSE 0 END) as Blocked, SUM(CASE WHEN response_type = 5 THEN 1 ELSE 0 END) as Dropped FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Query Types",
"type": "piechart",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 8, "w": 8, "x": 16, "y": 12 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" }
},
"overrides": []
},
"options": {
"legend": { "displayMode": "table", "placement": "right", "values": ["value", "percent"] },
"pieType": "donut",
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": true },
"tooltip": { "mode": "single" }
},
"targets": [
{
"rawSql": "SELECT SUM(CASE WHEN qtype = 1 THEN 1 ELSE 0 END) as A, SUM(CASE WHEN qtype = 28 THEN 1 ELSE 0 END) as AAAA, SUM(CASE WHEN qtype = 5 THEN 1 ELSE 0 END) as CNAME, SUM(CASE WHEN qtype = 15 THEN 1 ELSE 0 END) as MX, SUM(CASE WHEN qtype = 16 THEN 1 ELSE 0 END) as TXT, SUM(CASE WHEN qtype = 33 THEN 1 ELSE 0 END) as SRV, SUM(CASE WHEN qtype = 12 THEN 1 ELSE 0 END) as PTR, SUM(CASE WHEN qtype = 6 THEN 1 ELSE 0 END) as SOA, SUM(CASE WHEN qtype = 2 THEN 1 ELSE 0 END) as NS, SUM(CASE WHEN qtype = 65 THEN 1 ELSE 0 END) as HTTPS, SUM(CASE WHEN qtype NOT IN (1,2,5,6,12,15,16,28,33,65) THEN 1 ELSE 0 END) as Other FROM dns_logs WHERE $__timeFilter(timestamp)",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Top 20 Queried Domains",
"type": "table",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 10, "w": 12, "x": 0, "y": 20 },
"fieldConfig": {
"defaults": {
"custom": { "filterable": true }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "count" }, "properties": [{ "id": "custom.width", "value": 100 }] }
]
},
"options": {
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "count" }]
},
"targets": [
{
"rawSql": "SELECT qname as domain, COUNT(*) as count FROM dns_logs WHERE $__timeFilter(timestamp) GROUP BY qname ORDER BY count DESC LIMIT 20",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Top 20 Clients",
"type": "table",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 10, "w": 12, "x": 12, "y": 20 },
"fieldConfig": {
"defaults": {
"custom": { "filterable": true }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "count" }, "properties": [{ "id": "custom.width", "value": 100 }] }
]
},
"options": {
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "count" }]
},
"targets": [
{
"rawSql": "SELECT client_ip, COUNT(*) as count FROM dns_logs WHERE $__timeFilter(timestamp) GROUP BY client_ip ORDER BY count DESC LIMIT 20",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Average Response Time Over Time",
"type": "timeseries",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 30 },
"fieldConfig": {
"defaults": {
"color": { "mode": "palette-classic" },
"unit": "ms",
"custom": {
"axisBorderShow": false,
"axisLabel": "Response Time (ms)",
"axisPlacement": "auto",
"drawStyle": "line",
"fillOpacity": 20,
"gradientMode": "none",
"lineWidth": 2,
"pointSize": 5,
"showPoints": "never",
"spanNulls": true
}
},
"overrides": []
},
"options": {
"legend": { "calcs": ["mean", "max"], "displayMode": "list", "placement": "bottom" },
"tooltip": { "mode": "multi", "sort": "desc" }
},
"targets": [
{
"rawSql": "SELECT $__timeGroup(timestamp, $__interval) as time, AVG(response_rtt) as avg_rtt, MAX(response_rtt) as max_rtt FROM dns_logs WHERE $__timeFilter(timestamp) AND response_rtt IS NOT NULL GROUP BY time ORDER BY time",
"format": "time_series",
"refId": "A"
}
]
},
{
"title": "Top 20 NxDomain Domains",
"type": "table",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 10, "w": 12, "x": 0, "y": 38 },
"fieldConfig": {
"defaults": {
"custom": { "filterable": true }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "count" }, "properties": [{ "id": "custom.width", "value": 100 }] }
]
},
"options": {
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "count" }]
},
"targets": [
{
"rawSql": "SELECT qname as domain, COUNT(*) as count FROM dns_logs WHERE $__timeFilter(timestamp) AND rcode = 3 GROUP BY qname ORDER BY count DESC LIMIT 20",
"format": "table",
"refId": "A"
}
]
},
{
"title": "Top 20 Blocked Domains",
"type": "table",
"datasource": { "type": "mysql", "uid": "technitium-mysql" },
"gridPos": { "h": 10, "w": 12, "x": 12, "y": 38 },
"fieldConfig": {
"defaults": {
"custom": { "filterable": true }
},
"overrides": [
{ "matcher": { "id": "byName", "options": "count" }, "properties": [{ "id": "custom.width", "value": 100 }] }
]
},
"options": {
"showHeader": true,
"sortBy": [{ "desc": true, "displayName": "count" }]
},
"targets": [
{
"rawSql": "SELECT qname as domain, COUNT(*) as count FROM dns_logs WHERE $__timeFilter(timestamp) AND response_type = 4 GROUP BY qname ORDER BY count DESC LIMIT 20",
"format": "table",
"refId": "A"
}
]
}
],
"refresh": "5m",
"schemaVersion": 39,
"tags": ["dns", "technitium", "mysql"],
"templating": { "list": [] },
"time": { "from": "now-24h", "to": "now" },
"timepicker": {},
"timezone": "",
"title": "Technitium DNS",
"uid": "technitium-dns",
"version": 1
}

View file

@ -1,303 +0,0 @@
# HELP snmpEnableAuthenTraps Indicates whether the SNMP entity is permitted to generate authenticationFailure traps - 1.3.6.1.2.1.11.30
# TYPE snmpEnableAuthenTraps gauge
snmpEnableAuthenTraps 2
# HELP snmpInASNParseErrs The total number of ASN.1 or BER errors encountered by the SNMP entity when decoding received SNMP messages. - 1.3.6.1.2.1.11.6
# TYPE snmpInASNParseErrs counter
snmpInASNParseErrs 0
# HELP snmpInBadCommunityNames The total number of community-based SNMP messages (for example, SNMPv1) delivered to the SNMP entity which used an SNMP community name not known to said entity - 1.3.6.1.2.1.11.4
# TYPE snmpInBadCommunityNames counter
snmpInBadCommunityNames 184
# HELP snmpInBadCommunityUses The total number of community-based SNMP messages (for example, SNMPv1) delivered to the SNMP entity which represented an SNMP operation that was not allowed for the SNMP community named in the message - 1.3.6.1.2.1.11.5
# TYPE snmpInBadCommunityUses counter
snmpInBadCommunityUses 0
# HELP snmpInBadValues The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error-status field was `badValue'. - 1.3.6.1.2.1.11.10
# TYPE snmpInBadValues counter
snmpInBadValues 0
# HELP snmpInBadVersions The total number of SNMP messages which were delivered to the SNMP entity and were for an unsupported SNMP version. - 1.3.6.1.2.1.11.3
# TYPE snmpInBadVersions counter
snmpInBadVersions 0
# HELP snmpInGenErrs The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error-status field was `genErr'. - 1.3.6.1.2.1.11.12
# TYPE snmpInGenErrs counter
snmpInGenErrs 0
# HELP snmpInGetNexts The total number of SNMP Get-Next PDUs which have been accepted and processed by the SNMP protocol entity. - 1.3.6.1.2.1.11.16
# TYPE snmpInGetNexts counter
snmpInGetNexts 2940
# HELP snmpInGetRequests The total number of SNMP Get-Request PDUs which have been accepted and processed by the SNMP protocol entity. - 1.3.6.1.2.1.11.15
# TYPE snmpInGetRequests counter
snmpInGetRequests 9
# HELP snmpInGetResponses The total number of SNMP Get-Response PDUs which have been accepted and processed by the SNMP protocol entity. - 1.3.6.1.2.1.11.18
# TYPE snmpInGetResponses counter
snmpInGetResponses 0
# HELP snmpInNoSuchNames The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error-status field was `noSuchName'. - 1.3.6.1.2.1.11.9
# TYPE snmpInNoSuchNames counter
snmpInNoSuchNames 0
# HELP snmpInPkts The total number of messages delivered to the SNMP entity from the transport service. - 1.3.6.1.2.1.11.1
# TYPE snmpInPkts counter
snmpInPkts 5928
# HELP snmpInReadOnlys The total number valid SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error-status field was `readOnly' - 1.3.6.1.2.1.11.11
# TYPE snmpInReadOnlys counter
snmpInReadOnlys 0
# HELP snmpInSetRequests The total number of SNMP Set-Request PDUs which have been accepted and processed by the SNMP protocol entity. - 1.3.6.1.2.1.11.17
# TYPE snmpInSetRequests counter
snmpInSetRequests 0
# HELP snmpInTooBigs The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error-status field was `tooBig'. - 1.3.6.1.2.1.11.8
# TYPE snmpInTooBigs counter
snmpInTooBigs 0
# HELP snmpInTotalReqVars The total number of MIB objects which have been retrieved successfully by the SNMP protocol entity as the result of receiving valid SNMP Get-Request and Get-Next PDUs. - 1.3.6.1.2.1.11.13
# TYPE snmpInTotalReqVars counter
snmpInTotalReqVars 72699
# HELP snmpInTotalSetVars The total number of MIB objects which have been altered successfully by the SNMP protocol entity as the result of receiving valid SNMP Set-Request PDUs. - 1.3.6.1.2.1.11.14
# TYPE snmpInTotalSetVars counter
snmpInTotalSetVars 0
# HELP snmpInTraps The total number of SNMP Trap PDUs which have been accepted and processed by the SNMP protocol entity. - 1.3.6.1.2.1.11.19
# TYPE snmpInTraps counter
snmpInTraps 0
# HELP snmpOutBadValues The total number of SNMP PDUs which were generated by the SNMP protocol entity and for which the value of the error-status field was `badValue'. - 1.3.6.1.2.1.11.22
# TYPE snmpOutBadValues counter
snmpOutBadValues 0
# HELP snmpOutGenErrs The total number of SNMP PDUs which were generated by the SNMP protocol entity and for which the value of the error-status field was `genErr'. - 1.3.6.1.2.1.11.24
# TYPE snmpOutGenErrs counter
snmpOutGenErrs 0
# HELP snmpOutGetNexts The total number of SNMP Get-Next PDUs which have been generated by the SNMP protocol entity. - 1.3.6.1.2.1.11.26
# TYPE snmpOutGetNexts counter
snmpOutGetNexts 0
# HELP snmpOutGetRequests The total number of SNMP Get-Request PDUs which have been generated by the SNMP protocol entity. - 1.3.6.1.2.1.11.25
# TYPE snmpOutGetRequests counter
snmpOutGetRequests 0
# HELP snmpOutGetResponses The total number of SNMP Get-Response PDUs which have been generated by the SNMP protocol entity. - 1.3.6.1.2.1.11.28
# TYPE snmpOutGetResponses counter
snmpOutGetResponses 5740
# HELP snmpOutNoSuchNames The total number of SNMP PDUs which were generated by the SNMP protocol entity and for which the value of the error-status was `noSuchName'. - 1.3.6.1.2.1.11.21
# TYPE snmpOutNoSuchNames counter
snmpOutNoSuchNames 0
# HELP snmpOutPkts The total number of SNMP Messages which were passed from the SNMP protocol entity to the transport service. - 1.3.6.1.2.1.11.2
# TYPE snmpOutPkts counter
snmpOutPkts 5739
# HELP snmpOutSetRequests The total number of SNMP Set-Request PDUs which have been generated by the SNMP protocol entity. - 1.3.6.1.2.1.11.27
# TYPE snmpOutSetRequests counter
snmpOutSetRequests 0
# HELP snmpOutTooBigs The total number of SNMP PDUs which were generated by the SNMP protocol entity and for which the value of the error-status field was `tooBig.' - 1.3.6.1.2.1.11.20
# TYPE snmpOutTooBigs counter
snmpOutTooBigs 0
# HELP snmpOutTraps The total number of SNMP Trap PDUs which have been generated by the SNMP protocol entity. - 1.3.6.1.2.1.11.29
# TYPE snmpOutTraps counter
snmpOutTraps 0
# HELP snmpProxyDrops The total number of Confirmed Class PDUs (such as GetRequest-PDUs, GetNextRequest-PDUs, GetBulkRequest-PDUs, SetRequest-PDUs, and InformRequest-PDUs) delivered to the SNMP entity which were silently dropped because the transmission of the (possibly translated) message to a proxy target failed in a manner (other than a time-out) such that no Response Class PDU (such as a Response-PDU) could be returned. - 1.3.6.1.2.1.11.32
# TYPE snmpProxyDrops counter
snmpProxyDrops 0
# HELP snmpSilentDrops The total number of Confirmed Class PDUs (such as GetRequest-PDUs, GetNextRequest-PDUs, GetBulkRequest-PDUs, SetRequest-PDUs, and InformRequest-PDUs) delivered to the SNMP entity which were silently dropped because the size of a reply containing an alternate Response Class PDU (such as a Response-PDU) with an empty variable-bindings field was greater than either a local constraint or the maximum message size associated with the originator of the request. - 1.3.6.1.2.1.11.31
# TYPE snmpSilentDrops counter
snmpSilentDrops 0
# HELP snmp_scrape_duration_seconds Total SNMP time scrape took (walk and processing).
# TYPE snmp_scrape_duration_seconds gauge
snmp_scrape_duration_seconds{module="huawei"} 0.39253882
# HELP snmp_scrape_packets_retried Packets retried for get, bulkget, and walk.
# TYPE snmp_scrape_packets_retried gauge
snmp_scrape_packets_retried{module="huawei"} 0
# HELP snmp_scrape_packets_sent Packets sent for get, bulkget, and walk; including retries.
# TYPE snmp_scrape_packets_sent gauge
snmp_scrape_packets_sent{module="huawei"} 6
# HELP snmp_scrape_pdus_returned PDUs returned from get, bulkget, and walk.
# TYPE snmp_scrape_pdus_returned gauge
snmp_scrape_pdus_returned{module="huawei"} 104
# HELP snmp_scrape_walk_duration_seconds Time SNMP walk/bulkwalk took.
# TYPE snmp_scrape_walk_duration_seconds gauge
snmp_scrape_walk_duration_seconds{module="huawei"} 0.391760524
# HELP sysContact The textual identification of the contact person for this managed node, together with information on how to contact this person - 1.3.6.1.2.1.1.4
# TYPE sysContact gauge
sysContact{sysContact="Not Configure System Contact"} 1
# HELP sysDescr A textual description of the entity - 1.3.6.1.2.1.1.1
# TYPE sysDescr gauge
sysDescr{sysDescr="Linux GSE200M 2.6.27-SPEAr310 #80 Fri Jan 13 11:22:09 CST 2017 armv5tejl"} 1
# HELP sysLocation The physical location of this node (e.g., 'telephone closet, 3rd floor') - 1.3.6.1.2.1.1.6
# TYPE sysLocation gauge
sysLocation{sysLocation="Garage G03"} 1
# HELP sysName An administratively-assigned name for this managed node - 1.3.6.1.2.1.1.5
# TYPE sysName gauge
sysName{sysName="ups2000"} 1
# HELP sysORDescr A textual description of the capabilities identified by the corresponding instance of sysORID. - 1.3.6.1.2.1.1.9.1.3
# TYPE sysORDescr gauge
sysORDescr{sysORDescr="The MIB for Message Processing and Dispatching.",sysORIndex="3"} 1
sysORDescr{sysORDescr="The MIB module for SNMPv2 entities",sysORIndex="1"} 1
sysORDescr{sysORDescr="The SNMP Management Architecture MIB.",sysORIndex="5"} 1
sysORDescr{sysORDescr="The management information definitions for the SNMP User-based Security Model.",sysORIndex="4"} 1
sysORDescr{sysORDescr="View-based Access Control Model for SNMP.",sysORIndex="2"} 1
# HELP sysORID An authoritative identification of a capabilities statement with respect to various MIB modules supported by the local SNMP application acting as a command responder. - 1.3.6.1.2.1.1.9.1.2
# TYPE sysORID gauge
sysORID{sysORID="1.3.6.1.6.3.1",sysORIndex="1"} 1
sysORID{sysORID="1.3.6.1.6.3.10.3.1.1",sysORIndex="5"} 1
sysORID{sysORID="1.3.6.1.6.3.11.3.1.1",sysORIndex="3"} 1
sysORID{sysORID="1.3.6.1.6.3.15.2.1.1",sysORIndex="4"} 1
sysORID{sysORID="1.3.6.1.6.3.16.2.2.1",sysORIndex="2"} 1
# HELP sysORLastChange The value of sysUpTime at the time of the most recent change in state or value of any instance of sysORID. - 1.3.6.1.2.1.1.8
# TYPE sysORLastChange gauge
sysORLastChange 8
# HELP sysORUpTime The value of sysUpTime at the time this conceptual row was last instantiated. - 1.3.6.1.2.1.1.9.1.4
# TYPE sysORUpTime gauge
sysORUpTime{sysORIndex="1"} 7
sysORUpTime{sysORIndex="2"} 8
sysORUpTime{sysORIndex="3"} 8
sysORUpTime{sysORIndex="4"} 8
sysORUpTime{sysORIndex="5"} 8
# HELP sysObjectID The vendor's authoritative identification of the network management subsystem contained in the entity - 1.3.6.1.2.1.1.2
# TYPE sysObjectID gauge
sysObjectID{sysObjectID="1.3.6.1.4.1.8072.3.2.10"} 1
# HELP sysUpTime The time (in hundredths of a second) since the network management portion of the system was last re-initialized. - 1.3.6.1.2.1.1.3
# TYPE sysUpTime gauge
sysUpTime 5.3264032e+07
# HELP upsAlarmsPresent The present number of active alarm conditions. - 1.3.6.1.2.1.33.1.6.1
# TYPE upsAlarmsPresent gauge
upsAlarmsPresent 0
# HELP upsAutoRestart Setting this object to 'on' will cause the UPS system to restart after a shutdown if the shutdown occurred during a power loss as a result of either a upsShutdownAfterDelay or an internal battery depleted condition - 1.3.6.1.2.1.33.1.8.5
# TYPE upsAutoRestart gauge
upsAutoRestart 0
# HELP upsBatteryCurrent The present battery current. - 1.3.6.1.2.1.33.1.2.6
# TYPE upsBatteryCurrent gauge
upsBatteryCurrent 2.147483647e+09
# HELP upsBatteryStatus The indication of the capacity remaining in the UPS system's batteries - 1.3.6.1.2.1.33.1.2.1
# TYPE upsBatteryStatus gauge
upsBatteryStatus 2
# HELP upsBatteryTemperature The ambient temperature at or near the UPS Battery casing. - 1.3.6.1.2.1.33.1.2.7
# TYPE upsBatteryTemperature gauge
upsBatteryTemperature 2.147483647e+09
# HELP upsBatteryVoltage The magnitude of the present battery voltage. - 1.3.6.1.2.1.33.1.2.5
# TYPE upsBatteryVoltage gauge
upsBatteryVoltage 821
# HELP upsBypassFrequency The present bypass frequency. - 1.3.6.1.2.1.33.1.5.1
# TYPE upsBypassFrequency gauge
upsBypassFrequency 500
# HELP upsBypassLineIndex The bypass line identifier. - 1.3.6.1.2.1.33.1.5.3.1.1
# TYPE upsBypassLineIndex gauge
upsBypassLineIndex{upsBypassLineIndex="1"} 1
# HELP upsBypassNumLines The number of bypass lines utilized in this device - 1.3.6.1.2.1.33.1.5.2
# TYPE upsBypassNumLines gauge
upsBypassNumLines 1
# HELP upsBypassVoltage The present bypass voltage. - 1.3.6.1.2.1.33.1.5.3.1.2
# TYPE upsBypassVoltage gauge
upsBypassVoltage{upsBypassLineIndex="1"} 220
# HELP upsConfigAudibleStatus The requested state of the audible alarm - 1.3.6.1.2.1.33.1.9.8
# TYPE upsConfigAudibleStatus gauge
upsConfigAudibleStatus 0
# HELP upsConfigHighVoltageTransferPoint The maximum line voltage allowed before the UPS system transfers to battery backup. - 1.3.6.1.2.1.33.1.9.10
# TYPE upsConfigHighVoltageTransferPoint gauge
upsConfigHighVoltageTransferPoint 0
# HELP upsConfigInputFreq The nominal input frequency - 1.3.6.1.2.1.33.1.9.2
# TYPE upsConfigInputFreq gauge
upsConfigInputFreq 0
# HELP upsConfigInputVoltage The magnitude of the nominal input voltage - 1.3.6.1.2.1.33.1.9.1
# TYPE upsConfigInputVoltage gauge
upsConfigInputVoltage 0
# HELP upsConfigLowBattTime The value of upsEstimatedMinutesRemaining at which a lowBattery condition is declared - 1.3.6.1.2.1.33.1.9.7
# TYPE upsConfigLowBattTime gauge
upsConfigLowBattTime 0
# HELP upsConfigLowVoltageTransferPoint The minimum input line voltage allowed before the UPS system transfers to battery backup. - 1.3.6.1.2.1.33.1.9.9
# TYPE upsConfigLowVoltageTransferPoint gauge
upsConfigLowVoltageTransferPoint 0
# HELP upsConfigOutputFreq The nominal output frequency - 1.3.6.1.2.1.33.1.9.4
# TYPE upsConfigOutputFreq gauge
upsConfigOutputFreq 0
# HELP upsConfigOutputPower The magnitude of the nominal true power rating. - 1.3.6.1.2.1.33.1.9.6
# TYPE upsConfigOutputPower gauge
upsConfigOutputPower 0
# HELP upsConfigOutputVA The magnitude of the nominal Volt-Amp rating. - 1.3.6.1.2.1.33.1.9.5
# TYPE upsConfigOutputVA gauge
upsConfigOutputVA 0
# HELP upsConfigOutputVoltage The magnitude of the nominal output voltage - 1.3.6.1.2.1.33.1.9.3
# TYPE upsConfigOutputVoltage gauge
upsConfigOutputVoltage 0
# HELP upsEstimatedChargeRemaining An estimate of the battery charge remaining expressed as a percent of full charge. - 1.3.6.1.2.1.33.1.2.4
# TYPE upsEstimatedChargeRemaining gauge
upsEstimatedChargeRemaining 91
# HELP upsEstimatedMinutesRemaining An estimate of the time to battery charge depletion under the present load conditions if the utility power is off and remains off, or if it were to be lost and remain off. - 1.3.6.1.2.1.33.1.2.3
# TYPE upsEstimatedMinutesRemaining gauge
upsEstimatedMinutesRemaining 34
# HELP upsIdentAgentSoftwareVersion The UPS agent software version - 1.3.6.1.2.1.33.1.1.4
# TYPE upsIdentAgentSoftwareVersion gauge
upsIdentAgentSoftwareVersion{upsIdentAgentSoftwareVersion="V200R001C31B016"} 1
# HELP upsIdentAttachedDevices A string identifying the devices attached to the output(s) of the UPS - 1.3.6.1.2.1.33.1.1.6
# TYPE upsIdentAttachedDevices gauge
upsIdentAttachedDevices{upsIdentAttachedDevices="None"} 1
# HELP upsIdentManufacturer The name of the UPS manufacturer. - 1.3.6.1.2.1.33.1.1.1
# TYPE upsIdentManufacturer gauge
upsIdentManufacturer{upsIdentManufacturer="HUAWEI"} 1
# HELP upsIdentModel The UPS Model designation. - 1.3.6.1.2.1.33.1.1.2
# TYPE upsIdentModel gauge
upsIdentModel{upsIdentModel="UPS2000 2kVA"} 1
# HELP upsIdentName A string identifying the UPS - 1.3.6.1.2.1.33.1.1.5
# TYPE upsIdentName gauge
upsIdentName{upsIdentName="ups2000"} 1
# HELP upsIdentUPSSoftwareVersion The UPS firmware/software version(s) - 1.3.6.1.2.1.33.1.1.3
# TYPE upsIdentUPSSoftwareVersion gauge
upsIdentUPSSoftwareVersion{upsIdentUPSSoftwareVersion="V2R1C1SPC40"} 1
# HELP upsInputFrequency The present input frequency. - 1.3.6.1.2.1.33.1.3.3.1.2
# TYPE upsInputFrequency gauge
upsInputFrequency{upsInputLineIndex="1"} 500
# HELP upsInputLineBads A count of the number of times the input entered an out-of-tolerance condition as defined by the manufacturer - 1.3.6.1.2.1.33.1.3.1
# TYPE upsInputLineBads counter
upsInputLineBads 0
# HELP upsInputLineIndex The input line identifier. - 1.3.6.1.2.1.33.1.3.3.1.1
# TYPE upsInputLineIndex gauge
upsInputLineIndex{upsInputLineIndex="1"} 1
# HELP upsInputNumLines The number of input lines utilized in this device - 1.3.6.1.2.1.33.1.3.2
# TYPE upsInputNumLines gauge
upsInputNumLines 1
# HELP upsInputVoltage The magnitude of the present input voltage. - 1.3.6.1.2.1.33.1.3.3.1.3
# TYPE upsInputVoltage gauge
upsInputVoltage{upsInputLineIndex="1"} 218
# HELP upsOutputCurrent The present output current. - 1.3.6.1.2.1.33.1.4.4.1.3
# TYPE upsOutputCurrent gauge
upsOutputCurrent{upsOutputLineIndex="1"} 56
# HELP upsOutputFrequency The present output frequency. - 1.3.6.1.2.1.33.1.4.2
# TYPE upsOutputFrequency gauge
upsOutputFrequency 500
# HELP upsOutputLineIndex The output line identifier. - 1.3.6.1.2.1.33.1.4.4.1.1
# TYPE upsOutputLineIndex gauge
upsOutputLineIndex{upsOutputLineIndex="1"} 1
# HELP upsOutputNumLines The number of output lines utilized in this device - 1.3.6.1.2.1.33.1.4.3
# TYPE upsOutputNumLines gauge
upsOutputNumLines 1
# HELP upsOutputPercentLoad The percentage of the UPS power capacity presently being used on this output line, i.e., the greater of the percent load of true power capacity and the percent load of VA. - 1.3.6.1.2.1.33.1.4.4.1.5
# TYPE upsOutputPercentLoad gauge
upsOutputPercentLoad{upsOutputLineIndex="1"} 66
# HELP upsOutputPower The present output true power. - 1.3.6.1.2.1.33.1.4.4.1.4
# TYPE upsOutputPower gauge
upsOutputPower{upsOutputLineIndex="1"} 1
# HELP upsOutputSource The present source of output power - 1.3.6.1.2.1.33.1.4.1
# TYPE upsOutputSource gauge
upsOutputSource 3
# HELP upsOutputVoltage The present output voltage. - 1.3.6.1.2.1.33.1.4.4.1.2
# TYPE upsOutputVoltage gauge
upsOutputVoltage{upsOutputLineIndex="1"} 230
# HELP upsRebootWithDuration Setting this object will immediately shutdown (i.e., turn off) either the UPS output or the UPS system (as determined by the value of upsShutdownType at the time of shutdown) for a period equal to the indicated number of seconds, after which time the output will be started, including starting the UPS, if necessary - 1.3.6.1.2.1.33.1.8.4
# TYPE upsRebootWithDuration gauge
upsRebootWithDuration 0
# HELP upsSecondsOnBattery If the unit is on battery power, the elapsed time since the UPS last switched to battery power, or the time since the network management subsystem was last restarted, whichever is less - 1.3.6.1.2.1.33.1.2.2
# TYPE upsSecondsOnBattery gauge
upsSecondsOnBattery 0
# HELP upsShutdownAfterDelay Setting this object will shutdown (i.e., turn off) either the UPS output or the UPS system (as determined by the value of upsShutdownType at the time of shutdown) after the indicated number of seconds, or less if the UPS batteries become depleted - 1.3.6.1.2.1.33.1.8.2
# TYPE upsShutdownAfterDelay gauge
upsShutdownAfterDelay 0
# HELP upsShutdownType This object determines the nature of the action to be taken at the time when the countdown of the upsShutdownAfterDelay and upsRebootWithDuration objects reaches zero - 1.3.6.1.2.1.33.1.8.1
# TYPE upsShutdownType gauge
upsShutdownType 0
# HELP upsStartupAfterDelay Setting this object will start the output after the indicated number of seconds, including starting the UPS, if necessary - 1.3.6.1.2.1.33.1.8.3
# TYPE upsStartupAfterDelay gauge
upsStartupAfterDelay 0
# HELP upsTestElapsedTime The amount of time, in TimeTicks, since the test in progress was initiated, or, if no test is in progress, the previous test took to complete - 1.3.6.1.2.1.33.1.7.6
# TYPE upsTestElapsedTime gauge
upsTestElapsedTime 0
# HELP upsTestId The test is named by an OBJECT IDENTIFIER which allows a standard mechanism for the initiation of tests, including the well known tests identified in this document as well as those introduced by a particular implementation, i.e., as documented in the private enterprise MIB definition for the device - 1.3.6.1.2.1.33.1.7.1
# TYPE upsTestId gauge
upsTestId{upsTestId="0"} 1
# HELP upsTestResultsDetail Additional information about upsTestResultsSummary - 1.3.6.1.2.1.33.1.7.4
# TYPE upsTestResultsDetail gauge
upsTestResultsDetail{upsTestResultsDetail="0"} 1
# HELP upsTestResultsSummary The results of the current or last UPS diagnostics test performed - 1.3.6.1.2.1.33.1.7.3
# TYPE upsTestResultsSummary gauge
upsTestResultsSummary 0
# HELP upsTestSpinLock A spin lock on the test subsystem - 1.3.6.1.2.1.33.1.7.2
# TYPE upsTestSpinLock gauge
upsTestSpinLock 0
# HELP upsTestStartTime The value of sysUpTime at the time the test in progress was initiated, or, if no test is in progress, the time the previous test was initiated - 1.3.6.1.2.1.33.1.7.5
# TYPE upsTestStartTime gauge
upsTestStartTime 0

File diff suppressed because it is too large Load diff

View file

@ -1,97 +0,0 @@
resource "kubernetes_deployment" "goflow2" {
metadata {
name = "goflow2"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "goflow2"
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "goflow2"
}
}
template {
metadata {
labels = {
app = "goflow2"
}
}
spec {
container {
name = "goflow2"
image = "netsampler/goflow2:v2.2.1"
args = ["-listen", "netflow://:2055"]
port {
name = "netflow"
container_port = 2055
protocol = "UDP"
}
port {
name = "metrics"
container_port = 8080
protocol = "TCP"
}
resources {
requests = {
cpu = "50m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
}
}
}
}
}
resource "kubernetes_service" "goflow2" {
metadata {
name = "goflow2"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "goflow2"
}
}
spec {
selector = {
app = "goflow2"
}
port {
name = "metrics"
port = 8080
target_port = 8080
protocol = "TCP"
}
}
}
resource "kubernetes_service" "goflow2-netflow" {
metadata {
name = "goflow2-netflow"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "goflow2"
}
}
spec {
type = "NodePort"
selector = {
app = "goflow2"
}
port {
name = "netflow"
port = 2055
target_port = 2055
protocol = "UDP"
node_port = 32055
}
}
}

View file

@ -1,132 +0,0 @@
# resource "kubernetes_persistent_volume" "prometheus_grafana_pv" {
# metadata {
# name = "grafana-pv"
# }
# spec {
# capacity = {
# "storage" = "2Gi"
# }
# access_modes = ["ReadWriteOnce"]
# persistent_volume_source {
# nfs {
# path = "/mnt/main/grafana"
# server = var.nfs_server
# }
# # iscsi {
# # target_portal = "iscsi.viktorbarzin.lan:3260"
# # iqn = "iqn.2020-12.lan.viktorbarzin:storage:monitoring:grafana"
# # lun = 0
# # fs_type = "ext4"
# # }
# }
# }
# }
resource "kubernetes_persistent_volume" "alertmanager_pv" {
metadata {
name = "alertmanager-pv"
}
spec {
capacity = {
"storage" = "2Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
csi {
driver = "nfs.csi.k8s.io"
volume_handle = "alertmanager-pv"
volume_attributes = {
server = var.nfs_server
share = "/mnt/main/alertmanager"
}
}
}
mount_options = [
"soft",
"timeo=30",
"retrans=3",
"actimeo=5",
]
storage_class_name = "nfs-truenas"
}
}
# resource "kubernetes_persistent_volume_claim" "grafana_pvc" {
# metadata {
# name = "grafana-pvc"
# namespace = kubernetes_namespace.monitoring.metadata[0].name
# }
# spec {
# access_modes = ["ReadWriteOnce"]
# resources {
# requests = {
# "storage" = "2Gi"
# }
# }
# }
# }
# DB credentials from Vault database engine (rotated automatically)
# Provides GF_DATABASE_PASSWORD that auto-updates when password rotates
resource "kubernetes_manifest" "grafana_db_creds" {
manifest = {
apiVersion = "external-secrets.io/v1beta1"
kind = "ExternalSecret"
metadata = {
name = "grafana-db-creds"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec = {
refreshInterval = "15m"
secretStoreRef = {
name = "vault-database"
kind = "ClusterSecretStore"
}
target = {
name = "grafana-db-creds"
template = {
data = {
GF_DATABASE_PASSWORD = "{{ .password }}"
}
}
}
data = [{
secretKey = "password"
remoteRef = {
key = "static-creds/mysql-grafana"
property = "password"
}
}]
}
}
}
resource "kubernetes_config_map" "grafana_dashboards" {
for_each = fileset("${path.module}/dashboards", "*.json")
metadata {
name = "grafana-dashboard-${replace(trimsuffix(each.value, ".json"), "_", "-")}"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
grafana_dashboard = "1"
}
}
data = {
(each.value) = file("${path.module}/dashboards/${each.value}")
}
}
resource "helm_release" "grafana" {
namespace = kubernetes_namespace.monitoring.metadata[0].name
create_namespace = true
name = "grafana"
atomic = true
timeout = 600
repository = "https://grafana.github.io/helm-charts"
chart = "grafana"
values = [templatefile("${path.module}/grafana_chart_values.yaml", { grafana_admin_password = var.grafana_admin_password, mysql_host = var.mysql_host })]
depends_on = [kubernetes_manifest.grafana_db_creds]
}

View file

@ -1,103 +0,0 @@
deploymentStrategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
replicas: 2
adminPassword: "${grafana_admin_password}"
resources:
requests:
cpu: 50m
memory: 512Mi
limits:
memory: 512Mi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: grafana
podAnnotations:
dependency.kyverno.io/wait-for: "mysql.dbaas:3306"
podDisruptionBudget:
maxUnavailable: 1
persistence:
enabled: false # using external mysql
existingClaim: "grafana-pvc"
ingress:
enabled: "true"
ingressClassName: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.middlewares: "traefik-rate-limit@kubernetescrd,traefik-csp-headers@kubernetescrd,traefik-crowdsec@kubernetescrd"
traefik.ingress.kubernetes.io/router.entrypoints: "websecure"
gethomepage.dev/enabled: "true"
gethomepage.dev/name: "Grafana"
gethomepage.dev/description: "Dashboards & observability"
gethomepage.dev/icon: "grafana.png"
gethomepage.dev/group: "Core Platform"
gethomepage.dev/pod-selector: ""
gethomepage.dev/widget.type: "grafana"
gethomepage.dev/widget.url: "http://grafana.monitoring.svc.cluster.local"
gethomepage.dev/widget.username: "admin"
gethomepage.dev/widget.password: "${grafana_admin_password}"
tls:
- secretName: "tls-secret"
hosts:
- "grafana.viktorbarzin.me"
hosts:
- "grafana.viktorbarzin.me"
sidecar:
datasources:
enabled: "true"
dashboards:
enabled: true
label: "grafana_dashboard"
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
name: default
ordId: 1
# folder: ""
type: "file"
# disableDeletion: "false"
# editable: "true"
options:
path: "/var/lib/grafana/dashboards/default"
envFromSecrets:
- name: grafana-db-creds
optional: false
env:
GF_SERVER_ROOT_URL: https://grafana.viktorbarzin.me
grafana.ini:
database:
type: mysql
host: ${mysql_host}:3306
name: grafana
user: grafana
password: $__env{GF_DATABASE_PASSWORD}
ssl_mode: disable
auth.anonymous:
enabled: true
org_role: Viewer
# auth.google:
# enabled: true
analytics:
check_for_updates: "true"
grafana_net:
url: "https://grafana.net"
log:
mode: "console"
paths:
data: "/var/lib/grafana/data"
logs: "/var/log/grafana"
plugins: "/var/lib/grafana/plugins"
provisioning: "/etc/grafana/provisioning"
security:
allow_embedding: true # Allow to be iframed
# url: https://grafana.com/api/dashboards/11074/revisions/2/download
# datasources:
# - name: Prometheus
# url: http://prometheus-server

View file

@ -1,131 +0,0 @@
resource "kubernetes_config_map" "redfish-config" {
metadata {
name = "redfish-exporter-config"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"config.yml" = <<-EOF
address: 0.0.0.0
port: 9610
hosts:
${var.idrac_host}:
username: ${var.idrac_username}
password: ${var.idrac_password}
default:
username: root
password: calvin
metrics:
all: true
# system: true
# sensors: true
# power: true
# sel: false # Disable SEL - often slow
# storage: true # Disable storage - slowest endpoint
# memory: true
# network: false # Disable network adapters
# firmware: false # Don't need this frequently
EOF
}
}
resource "kubernetes_deployment" "idrac-redfish" {
metadata {
name = "idrac-redfish-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "idrac-redfish-exporter"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "idrac-redfish-exporter"
}
}
template {
metadata {
labels = {
app = "idrac-redfish-exporter"
}
}
spec {
priority_class_name = "tier-1-cluster"
container {
# https://github.com/mrlhansen/idrac_exporter?tab=readme-ov-file
# Patched v2.4.1 - restored missing idrac_power_supply_input_voltage metric
# See: https://github.com/mrlhansen/idrac_exporter/issues/176
image = "viktorbarzin/idrac-redfish-exporter:2.4.1-voltage-fix"
name = "redfish-exporter"
port {
container_port = 9610
}
volume_mount {
name = "redfish-exporter-config"
mount_path = "/etc/prometheus/idrac.yml"
sub_path = "config.yml"
}
}
volume {
name = "redfish-exporter-config"
config_map {
name = "redfish-exporter-config"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "idrac-redfish-exporter" {
metadata {
name = "idrac-redfish-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
"app" = "idrac-redfish-exporter"
}
# annotations = {
# "prometheus.io/scrape" = "true"
# "prometheus.io/path" = "/metrics"
# "prometheus.io/port" = "9090"
# }
}
spec {
selector = {
"app" = "idrac-redfish-exporter"
}
port {
name = "http"
port = "9090"
target_port = "9610"
}
}
}
module "idrac-redfish-exporter-ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.monitoring.metadata[0].name
name = "idrac-redfish-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9090
}

View file

@ -1,76 +0,0 @@
---
cluster:
name: default
destinations:
- name: loki
type: loki
url: http://loki-gateway.monitoring.svc.cluster.local/loki/api/v1/push
clusterEvents:
enabled: false
collector: alloy-logs
namespaces:
- dbaas
- immich
- authentik
- mailserver
- crowdsec
- descheduler
- monitoring
- ingress-nginx
- vaultwarden
nodeLogs:
enabled: false
podLogs:
enabled: true
gatherMethod: kubernetesApi
collector: alloy-logs
labelsToKeep:
[
"app_kubernetes_io_name",
"container",
"instance",
"job",
"level",
"namespace",
"service_name",
"service_namespace",
"deployment_environment",
"deployment_environment_name",
]
structuredMetadata:
pod: pod # Set structured metadata "pod" from label "pod"
namespaces:
- dbaas
- immich
- authentik
- mailserver
- crowdsec
- descheduler
- monitoring
- ingress-nginx
- vaultwarden
# Collectors
alloy-singleton:
enabled: false
alloy-metrics:
enabled: false
alloy-logs:
enabled: true
# Required when using the Kubernetes API to pod logs
alloy:
mounts:
varlog: false
clustering:
enabled: true
alloy-profiles:
enabled: false
alloy-receiver:
enabled: false

View file

@ -1,220 +0,0 @@
variable "nfs_server" { type = string }
# LOKI DISABLED - Uncomment to re-enable centralized logging
# Disabled due to operational overhead vs benefit analysis after node2 incident
# All configuration preserved in loki.yaml for future re-enabling
/*
resource "helm_release" "loki" {
namespace = kubernetes_namespace.monitoring.metadata[0].name
create_namespace = true
name = "loki"
repository = "https://grafana.github.io/helm-charts"
chart = "loki"
values = [templatefile("${path.module}/loki.yaml", {})]
timeout = 600
depends_on = [kubernetes_config_map.loki_alert_rules]
}
*/
# ALLOY DISABLED - Log collection agents (depends on Loki)
# https://grafana.com/docs/alloy/latest/configure/kubernetes/
# Configuration preserved in alloy.yaml for future re-enabling
/*
resource "helm_release" "alloy" {
namespace = kubernetes_namespace.monitoring.metadata[0].name
create_namespace = true
name = "alloy"
repository = "https://grafana.github.io/helm-charts"
chart = "alloy"
values = [file("${path.module}/alloy.yaml")]
atomic = true
depends_on = [helm_release.loki]
}
*/
# SYSCTL INOTIFY DISABLED - Was specifically for Loki file watching requirements
# Can be re-enabled when Loki is restored
/*
resource "kubernetes_daemon_set_v1" "sysctl-inotify" {
metadata {
name = "sysctl-inotify"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "sysctl-inotify"
}
}
spec {
selector {
match_labels = {
app = "sysctl-inotify"
}
}
template {
metadata {
labels = {
app = "sysctl-inotify"
}
}
spec {
init_container {
name = "sysctl"
image = "busybox:1.37"
command = [
"sh", "-c",
"sysctl -w fs.inotify.max_user_watches=1048576 && sysctl -w fs.inotify.max_user_instances=8192 && sysctl -w fs.inotify.max_queued_events=1048576"
]
security_context {
privileged = true
}
}
container {
name = "pause"
image = "registry.k8s.io/pause:3.10"
resources {
requests = {
cpu = "1m"
memory = "4Mi"
}
limits = {
cpu = "1m"
memory = "4Mi"
}
}
}
host_pid = true
toleration {
operator = "Exists"
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
*/
# resource "helm_release" "k8s-monitoring" {
# namespace = kubernetes_namespace.monitoring.metadata[0].name
# create_namespace = true
# name = "k8s-monitoring"
# repository = "https://grafana.github.io/helm-charts"
# chart = "k8s-monitoring"
# values = [templatefile("${path.module}/k8s-monitoring-values.yaml", {})]
# atomic = true
# }
# LOKI ALERT RULES DISABLED - Depend on Loki log queries
# These alert on kernel events from systemd journal logs via Loki
# Can be re-enabled when Loki is restored
/*
resource "kubernetes_config_map" "loki_alert_rules" {
metadata {
name = "loki-alert-rules"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
data = {
"rules.yaml" = yamlencode({
groups = [
{
name = "Node Health"
rules = [
{
alert = "KernelOOMKiller"
expr = "sum by (node) (count_over_time({job=\"node-journal\"} |~ \"(?i)Out of memory.*Killed process\" [5m])) > 0"
for = "0m"
labels = {
severity = "critical"
}
annotations = {
summary = "OOM killer active on {{ $labels.node }}"
}
},
{
alert = "KernelPanic"
expr = "sum by (node) (count_over_time({job=\"node-journal\"} |~ \"(?i)Kernel panic\" [5m])) > 0"
for = "0m"
labels = {
severity = "critical"
}
annotations = {
summary = "Kernel panic on {{ $labels.node }}"
}
},
{
alert = "KernelHungTask"
expr = "sum by (node) (count_over_time({job=\"node-journal\"} |~ \"blocked for more than\" [5m])) > 0"
for = "0m"
labels = {
severity = "warning"
}
annotations = {
summary = "Hung task detected on {{ $labels.node }}"
}
},
{
alert = "KernelSoftLockup"
expr = "sum by (node) (count_over_time({job=\"node-journal\"} |~ \"(?i)soft lockup\" [5m])) > 0"
for = "0m"
labels = {
severity = "critical"
}
annotations = {
summary = "Soft lockup on {{ $labels.node }}"
}
},
{
alert = "ContainerdDown"
expr = "sum by (node) (count_over_time({job=\"node-journal\", unit=\"containerd.service\"} |~ \"(?i)(dead|failed|deactivating)\" [5m])) > 0"
for = "1m"
labels = {
severity = "critical"
}
annotations = {
summary = "containerd service unhealthy on {{ $labels.node }}"
}
},
]
}
]
})
}
}
*/
# GRAFANA LOKI DATASOURCE DISABLED - Points to non-existent Loki service
# Can be re-enabled when Loki is restored
/*
resource "kubernetes_config_map" "grafana_loki_datasource" {
metadata {
name = "grafana-loki-datasource"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
grafana_datasource = "1"
}
}
data = {
"loki-datasource.yaml" = yamlencode({
apiVersion = 1
datasources = [{
name = "Loki"
type = "loki"
access = "proxy"
url = "http://loki.monitoring.svc.cluster.local:3100"
isDefault = false
}]
})
}
}
*/

View file

@ -1,109 +0,0 @@
loki:
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2025-04-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
ingester:
chunk_idle_period: 12h
max_chunk_age: 24h
chunk_retain_period: 1m
chunk_target_size: 1572864
wal:
dir: /loki-wal
pattern_ingester:
enabled: true
limits_config:
allow_structured_metadata: true
volume_enabled: true
retention_period: 720h
compactor:
retention_enabled: true
working_directory: /var/loki/compactor
compaction_interval: 1h
delete_request_store: filesystem
ruler:
enable_api: true
storage:
type: local
local:
directory: /var/loki/rules
alertmanager_url: http://prometheus-alertmanager.monitoring.svc.cluster.local:9093
ring:
kvstore:
store: inmemory
rule_path: /var/loki/scratch
storage:
type: "filesystem"
auth_enabled: false
minio:
enabled: false
deploymentMode: SingleBinary
singleBinary:
replicas: 1
persistence:
enabled: true
size: 50Gi
storageClass: "iscsi-truenas"
extraVolumes:
- name: wal
emptyDir:
medium: Memory
sizeLimit: 2Gi
- name: rules
configMap:
name: loki-alert-rules
extraVolumeMounts:
- name: wal
mountPath: /loki-wal
- name: rules
mountPath: /var/loki/rules/fake
resources:
requests:
cpu: 250m
memory: 2Gi
limits:
memory: 4Gi
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
# Disable optional components for single binary mode
gateway:
enabled: false
chunksCache:
enabled: false
resultsCache:
enabled: false

View file

@ -1,214 +0,0 @@
variable "tls_secret_name" {}
variable "alertmanager_account_password" {}
variable "idrac_host" {
default = "192.168.1.4"
}
variable "idrac_username" {
default = "root"
}
variable "idrac_password" {
default = "calvin"
sensitive = true
}
variable "alertmanager_slack_api_url" {}
variable "tiny_tuya_service_secret" {
type = string
sensitive = true
}
variable "haos_api_token" {
type = string
sensitive = true
}
variable "pve_password" {
type = string
sensitive = true
}
variable "grafana_admin_password" {
type = string
sensitive = true
}
variable "tier" { type = string }
variable "mysql_host" { type = string }
resource "kubernetes_namespace" "monitoring" {
metadata {
name = "monitoring"
labels = {
"istio-injection" : "disabled"
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.monitoring.metadata[0].name
tls_secret_name = var.tls_secret_name
}
# Terraform get angry with the 30k values file :/ use ansible until solved
# resource "helm_release" "ups_prometheus_snmp_exporter" {
# namespace = kubernetes_namespace.monitoring.metadata[0].name
# create_namespace = true
# name = "ups_prometheus_exporter"
# repository = "https://prometheus-community.github.io/helm-charts"
# chart = "prometheus-snmp-exporter"
# values = [file("${path.module}/ups_snmp_values.yaml")]
# }
resource "kubernetes_cron_job_v1" "monitor_prom" {
metadata {
name = "monitor-prometheus"
}
spec {
concurrency_policy = "Replace"
failed_jobs_history_limit = 5
schedule = "*/30 * * * *"
job_template {
metadata {
}
spec {
template {
metadata {
}
spec {
container {
name = "monitor-prometheus"
image = "alpine"
command = ["/bin/sh", "-c", "apk add --update curl && curl --connect-timeout 2 prometheus-server.monitoring.svc.cluster.local || curl https://webhook.viktorbarzin.me/fb/message-viktor -d 'Prometheus is down!'"]
}
}
}
}
}
}
}
resource "kubernetes_manifest" "status_redirect_middleware" {
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "status-redirect"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec = {
redirectRegex = {
regex = ".*"
replacement = "https://hetrixtools.com/r/38981b548b5d38b052aca8d01285a3f3/"
permanent = true
}
}
}
}
resource "kubernetes_ingress_v1" "status" {
metadata {
name = "hetrix-redirect-ingress"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-status-redirect@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["status.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "status.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "not-used"
port {
number = 80 # redirected by middleware
}
}
}
}
}
}
}
}
resource "kubernetes_manifest" "yotovski_redirect_middleware" {
manifest = {
apiVersion = "traefik.io/v1alpha1"
kind = "Middleware"
metadata = {
name = "yotovski-redirect"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec = {
redirectRegex = {
regex = ".*"
replacement = "https://hetrixtools.com/r/2ba9d7a5e017794db0fd91f0115a8b3b/"
permanent = true
}
}
}
}
resource "kubernetes_ingress_v1" "status_yotovski" {
metadata {
name = "hetrix-yotovski-redirect-ingress"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "monitoring-yotovski-redirect@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["yotovski-status.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "yotovski-status.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "not-used" # redirected by middleware
port {
number = 80
}
}
}
}
}
}
}
}
# Custom ResourceQuota for monitoring larger than the default 1-cluster tier quota
# because monitoring runs 29+ pods (Prometheus, Grafana, Loki, Alloy, exporters, etc.)
resource "kubernetes_resource_quota" "monitoring" {
metadata {
name = "monitoring-quota"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.memory" = "64Gi"
pods = "100"
}
}
}

View file

@ -1,31 +0,0 @@
resource "kubernetes_persistent_volume_claim" "prometheus_server_pvc" {
metadata {
name = "prometheus-data-proxmox"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
spec {
access_modes = ["ReadWriteOnce"]
storage_class_name = "proxmox-lvm"
resources {
requests = {
storage = "200Gi"
}
}
}
}
resource "helm_release" "prometheus" {
namespace = kubernetes_namespace.monitoring.metadata[0].name
create_namespace = true
name = "prometheus"
repository = "https://prometheus-community.github.io/helm-charts"
chart = "prometheus"
# version = "15.0.2"
version = "25.8.2"
values = [templatefile("${path.module}/prometheus_chart_values.tpl", { alertmanager_mail_pass = var.alertmanager_account_password, alertmanager_slack_api_url = var.alertmanager_slack_api_url, tuya_api_key = var.tiny_tuya_service_secret, haos_api_token = var.haos_api_token })]
}

File diff suppressed because it is too large Load diff

View file

@ -1,126 +0,0 @@
resource "kubernetes_secret" "pve_exporter_config" {
metadata {
name = "pve-exporter-config"
namespace = kubernetes_namespace.monitoring.metadata[0].name
}
data = {
"pve.yml" = <<-EOF
default:
user: "root@pam"
password: ${var.pve_password}
verify_ssl: false
timeout: 30
EOF
}
}
resource "kubernetes_deployment" "pve_exporter" {
metadata {
name = "proxmox-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "proxmox-exporter"
}
}
template {
metadata {
labels = {
app = "proxmox-exporter"
}
annotations = {
"diun.enable" = "true"
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$"
}
}
spec {
container {
name = "proxmox-exporter"
image = "prompve/prometheus-pve-exporter:3.8.2"
port {
container_port = 9221
}
resources {
requests = {
cpu = "15m"
memory = "256Mi"
}
limits = {
memory = "256Mi"
}
}
# Mount the file into the container
volume_mount {
name = "config-volume"
mount_path = "/etc/prometheus"
read_only = true
}
}
volume {
name = "config-volume"
secret {
secret_name = kubernetes_secret.pve_exporter_config.metadata[0].name
items {
key = "pve.yml"
path = "pve.yml" # This results in /etc/prometheus/pve.yml
}
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "proxmox-exporter" {
metadata {
name = "proxmox-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
"app" = "proxmox-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/port" = 9221
"prometheus.io/path" = "/pve"
"prometheus.io/param_target" = "192.168.1.127"
"prometheus.io/param_node" = "1"
"prometheus.io/param_cluster" = "1"
}
}
spec {
selector = {
"app" = "proxmox-exporter"
}
port {
name = "http"
port = 9221
target_port = 9221
}
}
}
# To monitor the pve node, use the node exporter and the playbook in this repo. from the root run:
# ansible-playbook -i ./playbooks/inventory.ini ./playbooks/deploy_node_exporter.yaml
# This installs the exporter binary

View file

@ -1,51 +0,0 @@
import asyncio
import logging
import os
import signal
import sys
import time
import aiohttp
iDRAC_HOST = 'idrac'
iDRAC_USER_ENV_VAR = 'idrac_user'
iDRAC_PASSWORD_ENV_VAR = 'idrac_password'
SHOULD_RUN = True
def signal_handler(sig, frame):
logging.warning(f'signal {sig} received. shutting down gracefully...')
global SHOULD_RUN
SHOULD_RUN = False
time.sleep(60)
sys.exit(0)
async def main() -> None:
# define signal handlers
signal.signal(signal.SIGINT, signal_handler)
user = os.environ.get(iDRAC_USER_ENV_VAR)
if user is None:
logging.critical('missing environment variable for idrac user'
f' please set {iDRAC_USER_ENV_VAR}')
return
password = os.environ.get(iDRAC_PASSWORD_ENV_VAR)
if password is None:
logging.critical('missing environment variable for idrac password'
f' please set {iDRAC_PASSWORD_ENV_VAR}')
return
logging.info('service initiated with credentials')
return await monitor(user, password)
async def monitor(user: str, password: str) -> None:
while SHOULD_RUN:
pass
if __name__ == '__main__':
# abandoned bc server cannot start itself when it's off :/
asyncio.run(main())

View file

@ -1,66 +0,0 @@
#!/bin/sh
tag=server-power-cycle-script
logger -t $tag start $(date '+%F-%R')
if [ -f /tmp/server-power-cycle-lock ]; then
logger -t $tag 'Script already running. exiting'
exit 0
fi
touch /tmp/server-power-cycle-lock
if [ -f /root/server-power-cycle/state.off ]; then
logger -t $tag 'Server state set to off'
while true; do
sleep 60 # sleep 1 minute
logger -t $tag 'Trying to connect to idrac system...'
curl --connect-timeout 5 -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2
if [[ $? -eq 0 ]]; then
logger -t $tag "Connected to idrac, assuming power is back on"
logger -t $tag "Power supply restored, sending power on command"
curl -s -k -u root:calvin -X POST -d '{"Action": "Reset", "ResetType": "On"}' -H"Content-type: application/json" https://192.168.1.4/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset
rm /root/server-power-cycle/state.off
logger -t $tag end $(date '+%F-%R')
rm /tmp/server-power-cycle-lock
exit 0
fi
done
fi
voltage=$(curl -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2 |jq .LineInputVoltage)
# check input voltage on the pwoer supply connected to the outer system
if [[ $voltage -gt 0 ]]; then
logger -t $tag "power supply is on. exiting"
logger -t $tag end $(date '+%F-%R')
rm /tmp/server-power-cycle-lock
exit 0
fi
to_wait=30
echo "Continuously checking power supply for the next $to_wait minutes"
for i in $(seq 30); do
logger -t $tag "Sleeping a minute..Minute $i"
sleep 60
# check input voltage on the pwoer supply connected to the outer system
voltage=$(curl -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2 |jq .LineInputVoltage)
if [[ $voltage -gt 0 ]]; then
logger -t $tag "power supply is on. exiting"
logger -t $tag end $(date '+%F-%R')
rm /tmp/server-power-cycle-lock
exit 0
fi
done
logger -t $tag "Power supply did not come back, sending graceful shutdown signal"
curl -s -k -u root:calvin -X POST -d '{"Action": "Reset", "ResetType": "GracefulShutdown"}' -H"Content-type: application/json" https://192.168.1.4/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset
touch /root/server-power-cycle/state.off
rm /tmp/server-power-cycle-lock
logger -t $tag end $(date '+%F-%R')

View file

@ -1,146 +0,0 @@
/**
1. clone snmp exporter
2. update generator.yaml to include only interesting modules
3. make generate
4. cp snmp.yml to whereever is used
5. scrape service with curl 'http://snmp-exporter.monitoring.svc.cluster.local:9116/snmp?auth=public_v2&module=huawei&target=192.168.1.5%3A161'
generate reference - https://github.com/prometheus/snmp_exporter/tree/main/generator
https://sbcode.net/prometheus/snmp-generate-huawei/
*/
resource "kubernetes_config_map" "snmp-exporter-yaml" {
metadata {
name = "snmp-exporter-yaml"
namespace = kubernetes_namespace.monitoring.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"snmp.yml" = file("${path.module}/ups_snmp_values.yaml")
}
}
resource "kubernetes_deployment" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
app = "snmp-exporter"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "snmp-exporter"
}
}
template {
metadata {
labels = {
app = "snmp-exporter"
}
annotations = {
"diun.enable" = "true"
"diun.include_tags" = "^v\\d+\\.\\d+\\.\\d+$"
}
}
spec {
container {
image = "prom/snmp-exporter:v0.30.1"
name = "snmp-exporter"
# command = ["/usr/local/bin/redfish_exporter", "--config.file", "/app/config.yml"]
resources {
requests = {
cpu = "10m"
memory = "256Mi"
}
limits = {
memory = "256Mi"
}
}
port {
container_port = 9116
}
volume_mount {
name = "config-volume"
mount_path = "/etc/snmp_exporter/"
}
}
volume {
name = "config-volume"
config_map {
name = "snmp-exporter-yaml"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = kubernetes_namespace.monitoring.metadata[0].name
labels = {
"app" = "snmp-exporter"
}
# annotations = {
# "prometheus.io/scrape" = "true"
# "prometheus.io/path" = "/snmp?auth=Public0&target=tcp%3A%2F%2F192.%3A161"
# "prometheus.io/port" = "9116"
# }
}
spec {
selector = {
"app" = "snmp-exporter"
}
port {
name = "http"
port = "9116"
target_port = "9116"
}
}
}
module "snmp-exporter-ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.monitoring.metadata[0].name
name = "snmp-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9116
}
module "snmp-exporter-ingress-external" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.monitoring.metadata[0].name
name = "snmp-exporter-external"
root_domain = "viktorbarzin.me"
tls_secret_name = var.tls_secret_name
allow_local_access_only = false
ssl_redirect = false
port = 9116
protected = false
}

File diff suppressed because it is too large Load diff

View file

@ -1,93 +0,0 @@
variable "tier" { type = string }
variable "nfs_server" { type = string }
resource "kubernetes_namespace" "nfs_csi" {
metadata {
name = "nfs-csi"
labels = {
tier = var.tier
}
}
}
resource "helm_release" "nfs_csi_driver" {
namespace = kubernetes_namespace.nfs_csi.metadata[0].name
create_namespace = false
name = "csi-driver-nfs"
atomic = true
timeout = 300
repository = "https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts"
chart = "csi-driver-nfs"
values = [yamlencode({
controller = {
replicas = 2
livenessProbe = {
httpPort = 29653
}
resources = {
csiProvisioner = {
requests = { cpu = "10m", memory = "128Mi" }
limits = { memory = "128Mi" }
}
csiResizer = {
requests = { cpu = "10m", memory = "128Mi" }
limits = { memory = "128Mi" }
}
csiSnapshotter = {
requests = { cpu = "10m", memory = "128Mi" }
limits = { memory = "128Mi" }
}
nfs = {
requests = { cpu = "10m", memory = "128Mi" }
limits = { memory = "128Mi" }
}
livenessProbe = {
requests = { cpu = "10m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
}
}
node = {
resources = {
nfs = {
requests = { cpu = "10m", memory = "128Mi" }
limits = { memory = "128Mi" }
}
livenessProbe = {
requests = { cpu = "10m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
nodeDriverRegistrar = {
requests = { cpu = "10m", memory = "64Mi" }
limits = { memory = "64Mi" }
}
}
}
storageClass = {
create = false
}
})]
}
resource "kubernetes_storage_class" "nfs_truenas" {
metadata {
name = "nfs-truenas"
}
storage_provisioner = "nfs.csi.k8s.io"
reclaim_policy = "Retain"
volume_binding_mode = "Immediate"
mount_options = [
"soft",
"timeo=30",
"retrans=3",
"actimeo=5",
]
parameters = {
server = var.nfs_server
share = "/mnt/main"
}
}

View file

@ -1,27 +0,0 @@
# GPU container
FROM ubuntu
ENV DEBIAN_FRONTEND=noninteractive
# Install Python and pip
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 \
python3-pip \
python3-venv
# Deps
RUN apt-get install -y ffmpeg espeak-ng
# Set a working directory
WORKDIR /app
RUN python3 -m venv audiblez && ./audiblez/bin/pip install audiblez
# RUN python3 -m venv audiblez
CMD ["/usr/bin/sleep", "86400"]
# RUN pip install audiblez
# # Default command
# CMD ["/usr/bin/sleep", "86400"]

View file

@ -1,688 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.nvidia.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "nvidia" {
metadata {
name = "nvidia"
labels = {
"istio-injection" : "disabled"
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "kubernetes_resource_quota" "nvidia_quota" {
metadata {
name = "tier-quota"
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
spec {
hard = {
"limits.memory" = "48Gi"
"requests.cpu" = "8"
"requests.memory" = "12Gi"
pods = "40"
}
}
}
# Apply GPU taint and label to ensure only GPU workloads run on GPU node
resource "null_resource" "gpu_node_config" {
provisioner "local-exec" {
command = <<-EOT
kubectl taint nodes k8s-node1 nvidia.com/gpu=true:PreferNoSchedule --overwrite
kubectl label nodes k8s-node1 gpu=true --overwrite
EOT
}
# Re-run if namespace changes (proxy for cluster changes)
triggers = {
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
}
# [not needed anymore; part of the chart values] Apply to operator with:
# kubectl patch clusterpolicies.nvidia.com/cluster-policy -n gpu-operator --type merge -p '{"spec": {"devicePlugin": {"config": {"name": "time-slicing-config", "default": "any"}}}}'
resource "kubernetes_config_map" "time_slicing_config" {
metadata {
name = "time-slicing-config"
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
data = {
any = <<-EOF
flags:
migStrategy: none
sharing:
timeSlicing:
renameByDefault: false
failRequestsGreaterThanOne: false
resources:
- name: nvidia.com/gpu
replicas: 100
EOF
}
depends_on = [kubernetes_namespace.nvidia]
}
resource "helm_release" "nvidia-gpu-operator" {
namespace = kubernetes_namespace.nvidia.metadata[0].name
name = "nvidia-gpu-operator"
repository = "https://helm.ngc.nvidia.com/nvidia"
chart = "gpu-operator"
atomic = true
# version = "0.9.3"
timeout = 6000
values = [templatefile("${path.module}/values.yaml", {})]
depends_on = [kubernetes_config_map.time_slicing_config]
}
resource "kubernetes_deployment" "nvidia-exporter" {
metadata {
name = "nvidia-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
app = "nvidia-exporter"
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "nvidia-exporter"
}
}
template {
metadata {
labels = {
app = "nvidia-exporter"
}
}
spec {
node_selector = {
"gpu" : "true"
}
toleration {
key = "nvidia.com/gpu"
operator = "Equal"
value = "true"
effect = "NoSchedule"
}
container {
image = "nvidia/dcgm-exporter:latest"
name = "nvidia-exporter"
port {
container_port = 9400
}
security_context {
privileged = true
capabilities {
add = ["SYS_ADMIN"]
}
}
resources {
requests = {
memory = "192Mi"
}
limits = {
memory = "192Mi"
"nvidia.com/gpu" = "1"
}
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
depends_on = [helm_release.nvidia-gpu-operator]
}
resource "kubernetes_service" "nvidia-exporter" {
metadata {
name = "nvidia-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
"app" = "nvidia-exporter"
}
}
spec {
selector = {
app = "nvidia-exporter"
}
port {
name = "http"
port = 80
target_port = 9400
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.nvidia.metadata[0].name
name = "nvidia-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
}
# resource "kubernetes_ingress_v1" "nvidia-exporter" {
# metadata {
# name = "nvidia-exporter"
# namespace = kubernetes_namespace.nvidia.metadata[0].name
# annotations = {
# "kubernetes.io/ingress.class" = "nginx"
# "nginx.ingress.kubernetes.io/whitelist-source-range" : "192.168.1.0/24, 10.0.0.0/8"
# "nginx.ingress.kubernetes.io/ssl-redirect" : "false" # used only in LAN
# }
# }
# spec {
# tls {
# hosts = ["nvidia-exporter.viktorbarzin.lan"]
# secret_name = var.tls_secret_name
# }
# rule {
# host = "nvidia-exporter.viktorbarzin.lan"
# http {
# path {
# backend {
# service {
# name = "nvidia-exporter"
# port {
# number = 80
# }
# }
# }
# }
# }
# }
# }
# }
# resource "kubernetes_deployment" "gpu-container" {
# metadata {
# name = "gpu-container"
# namespace = kubernetes_namespace.nvidia.metadata[0].name
# labels = {
# app = "gpu-container"
# }
# }
# spec {
# replicas = 1
# selector {
# match_labels = {
# app = "gpu-container"
# }
# }
# template {
# metadata {
# labels = {
# app = "gpu-container"
# }
# }
# spec {
# node_selector = {
# "gpu" : "true"
# }
# container {
# image = "ubuntu"
# name = "gpu-container"
# command = ["/usr/bin/sleep", "3600"]
# # security_context {
# # privileged = true
# # capabilities {
# # add = ["SYS_ADMIN"]
# # }
# # }
# resources {
# limits = {
# "nvidia.com/gpu" = "1"
# }
# }
# }
# }
# }
# }
# depends_on = [helm_release.nvidia-gpu-operator]
# }
# GPU Pod Memory Exporter - exposes per-pod GPU memory usage as Prometheus metrics
resource "kubernetes_config_map" "gpu_pod_exporter_script" {
metadata {
name = "gpu-pod-exporter-script"
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
data = {
"exporter.py" = <<-EOF
#!/usr/bin/env python3
"""GPU Pod Memory Exporter - Collects per-pod GPU memory usage."""
import subprocess
import time
import re
import os
import json
import urllib.request
import ssl
from http.server import HTTPServer, BaseHTTPRequestHandler
METRICS_PORT = 9401
SCRAPE_INTERVAL = 15
# Kubernetes API configuration
K8S_API = "https://kubernetes.default.svc"
TOKEN_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/token"
CA_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
# Cache for container ID to pod info mapping
container_cache = {}
cache_refresh_time = 0
CACHE_TTL = 60 # Refresh cache every 60 seconds
def get_k8s_token():
"""Read Kubernetes service account token."""
try:
with open(TOKEN_PATH, 'r') as f:
return f.read().strip()
except:
return None
def refresh_container_cache():
"""Refresh the container ID to pod mapping from Kubernetes API."""
global container_cache, cache_refresh_time
token = get_k8s_token()
if not token:
return
try:
# Create SSL context with K8s CA
ctx = ssl.create_default_context()
if os.path.exists(CA_PATH):
ctx.load_verify_locations(CA_PATH)
# Get all pods on this node
node_name = os.environ.get('NODE_NAME', '')
url = f"{K8S_API}/api/v1/pods?fieldSelector=spec.nodeName={node_name}"
req = urllib.request.Request(url, headers={
'Authorization': f'Bearer {token}',
'Accept': 'application/json'
})
with urllib.request.urlopen(req, context=ctx, timeout=10) as resp:
data = json.loads(resp.read().decode())
new_cache = {}
for pod in data.get('items', []):
pod_name = pod['metadata']['name']
namespace = pod['metadata']['namespace']
# Get container statuses
for status in pod.get('status', {}).get('containerStatuses', []):
container_id = status.get('containerID', '')
# Extract the ID part (e.g., "containerd://abc123..." -> "abc123")
if '://' in container_id:
container_id = container_id.split('://')[-1]
if container_id:
short_id = container_id[:12]
new_cache[short_id] = {
'pod': pod_name,
'namespace': namespace,
'container': status.get('name', 'unknown')
}
container_cache = new_cache
cache_refresh_time = time.time()
print(f"Refreshed container cache: {len(new_cache)} containers")
except Exception as e:
print(f"Error refreshing container cache: {e}")
def get_pod_info(container_id):
"""Look up pod info for a container ID."""
global cache_refresh_time
# Refresh cache if stale
if time.time() - cache_refresh_time > CACHE_TTL:
refresh_container_cache()
return container_cache.get(container_id, {
'pod': 'unknown',
'namespace': 'unknown',
'container': 'unknown'
})
def get_gpu_processes():
"""Run nvidia-smi to get GPU process info."""
try:
result = subprocess.run(
["nvidia-smi", "--query-compute-apps=pid,used_memory,process_name", "--format=csv,noheader,nounits"],
capture_output=True, text=True, timeout=10
)
if result.returncode != 0:
print(f"nvidia-smi error: {result.stderr}")
return []
processes = []
for line in result.stdout.strip().split('\n'):
if not line.strip():
continue
parts = [p.strip() for p in line.split(',')]
if len(parts) >= 3:
pid, memory_mib, process_name = parts[0], parts[1], parts[2]
processes.append({
'pid': pid,
'memory_bytes': int(memory_mib) * 1024 * 1024,
'process_name': process_name
})
return processes
except Exception as e:
print(f"Error running nvidia-smi: {e}")
return []
def get_container_id(pid):
"""Map PID to container ID via cgroup."""
cgroup_path = f"/host_proc/{pid}/cgroup"
try:
with open(cgroup_path, 'r') as f:
for line in f:
# Match container ID patterns (docker, containerd, cri-o)
match = re.search(r'[:/]([a-f0-9]{64})', line)
if match:
return match.group(1)[:12]
match = re.search(r'cri-containerd-([a-f0-9]{64})', line)
if match:
return match.group(1)[:12]
except (FileNotFoundError, PermissionError):
pass
return "host"
# Global metrics storage
current_metrics = []
def collect_metrics():
"""Collect GPU memory metrics."""
global current_metrics
metrics = []
processes = get_gpu_processes()
for proc in processes:
container_id = get_container_id(proc['pid'])
pod_info = get_pod_info(container_id)
metrics.append({
'container_id': container_id,
'pid': proc['pid'],
'process_name': proc['process_name'],
'memory_bytes': proc['memory_bytes'],
'pod': pod_info['pod'],
'namespace': pod_info['namespace'],
'container': pod_info['container']
})
current_metrics = metrics
def format_metrics():
"""Format metrics in Prometheus exposition format."""
lines = [
"# HELP gpu_pod_memory_used_bytes GPU memory used by pod",
"# TYPE gpu_pod_memory_used_bytes gauge"
]
for m in current_metrics:
labels = ','.join([
f'namespace="{m["namespace"]}"',
f'pod="{m["pod"]}"',
f'container="{m["container"]}"',
f'process_name="{m["process_name"]}"',
f'pid="{m["pid"]}"'
])
lines.append(f'gpu_pod_memory_used_bytes{{{labels}}} {m["memory_bytes"]}')
return '\n'.join(lines) + '\n'
class MetricsHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/metrics':
content = format_metrics()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(content.encode())
elif self.path == '/health':
self.send_response(200)
self.end_headers()
self.wfile.write(b'ok')
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args):
pass # Suppress request logging
def background_collector():
"""Background thread to collect metrics periodically."""
import threading
def run():
while True:
collect_metrics()
time.sleep(SCRAPE_INTERVAL)
thread = threading.Thread(target=run, daemon=True)
thread.start()
if __name__ == '__main__':
print(f"Starting GPU Pod Memory Exporter on port {METRICS_PORT}")
refresh_container_cache() # Initial cache load
collect_metrics() # Initial collection
background_collector()
server = HTTPServer(('', METRICS_PORT), MetricsHandler)
server.serve_forever()
EOF
}
}
resource "kubernetes_service_account" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
}
resource "kubernetes_cluster_role" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["list"]
}
}
resource "kubernetes_cluster_role_binding" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.gpu_pod_exporter.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.gpu_pod_exporter.metadata[0].name
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
}
resource "kubernetes_daemonset" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
app = "gpu-pod-exporter"
tier = var.tier
}
}
spec {
selector {
match_labels = {
app = "gpu-pod-exporter"
}
}
template {
metadata {
labels = {
app = "gpu-pod-exporter"
}
}
spec {
host_pid = true
service_account_name = kubernetes_service_account.gpu_pod_exporter.metadata[0].name
node_selector = {
"gpu" : "true"
}
toleration {
key = "nvidia.com/gpu"
operator = "Equal"
value = "true"
effect = "NoSchedule"
}
container {
name = "exporter"
image = "python:3.11-slim"
command = ["/bin/bash", "-c"]
args = [
"python3 /scripts/exporter.py"
]
env {
name = "NODE_NAME"
value_from {
field_ref {
field_path = "spec.nodeName"
}
}
}
port {
container_port = 9401
name = "metrics"
}
volume_mount {
name = "scripts"
mount_path = "/scripts"
read_only = true
}
volume_mount {
name = "host-proc"
mount_path = "/host_proc"
read_only = true
}
resources {
requests = {
cpu = "10m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
"nvidia.com/gpu" = "1"
}
}
liveness_probe {
http_get {
path = "/health"
port = 9401
}
initial_delay_seconds = 30
period_seconds = 30
timeout_seconds = 5
}
}
volume {
name = "scripts"
config_map {
name = kubernetes_config_map.gpu_pod_exporter_script.metadata[0].name
default_mode = "0755"
}
}
volume {
name = "host-proc"
host_path {
path = "/proc"
type = "Directory"
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
depends_on = [helm_release.nvidia-gpu-operator]
}
resource "kubernetes_service" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
app = "gpu-pod-exporter"
}
}
spec {
selector = {
app = "gpu-pod-exporter"
}
port {
name = "metrics"
port = 80
target_port = 9401
}
}
}

Some files were not shown because too many files have changed in this diff Show more