truenas deprecation: migrate all non-immich storage to proxmox NFS
- Migrate 7 backup CronJobs to Proxmox host NFS (192.168.1.127) (etcd, mysql, postgresql, nextcloud, redis, vaultwarden, plotting-book) - Migrate headscale backup, ebook2audiobook, osm_routing to Proxmox NFS - Migrate servarr (lidarr, readarr, soulseek) NFS refs to Proxmox - Remove 79 orphaned TrueNAS NFS module declarations from 49 stacks - Delete stacks/platform/modules/ (27 dead module copies, 65MB) - Update nfs-truenas StorageClass to point to Proxmox (192.168.1.127) - Remove iscsi DNS record from config.tfvars - Fix woodpecker persistence config and alertmanager PV Only Immich (8 PVCs, ~1.4TB) remains on TrueNAS.
This commit is contained in:
parent
3246c4d112
commit
82b0f6c4cb
193 changed files with 825 additions and 177172 deletions
BIN
config.tfvars
BIN
config.tfvars
Binary file not shown.
|
|
@ -145,14 +145,6 @@ locals {
|
|||
]
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "affine-data"
|
||||
namespace = kubernetes_namespace.affine.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/affine"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -58,14 +58,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "changedetection-data"
|
||||
namespace = kubernetes_namespace.changedetection.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/changedetection"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -113,13 +113,41 @@ resource "cloudflare_record" "non_proxied_dns_record_ipv6" {
|
|||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mail" {
|
||||
content = "mail.viktorbarzin.me"
|
||||
resource "cloudflare_record" "forwardemail_mx1" {
|
||||
content = "mx1.forwardemail.net"
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "MX"
|
||||
priority = 1
|
||||
priority = 10
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "forwardemail_mx2" {
|
||||
content = "mx2.forwardemail.net"
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "MX"
|
||||
priority = 10
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "forwardemail_config" {
|
||||
content = "\"forward-email=mail.viktorbarzin.me\""
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "TXT"
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "forwardemail_port" {
|
||||
content = "\"forward-email-port=266\""
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "TXT"
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
variable "tls_secret_name" { type = string }
|
||||
variable "mysql_host" { type = string }
|
||||
variable "postgresql_host" { type = string }
|
||||
|
||||
data "vault_kv_secret_v2" "secrets" {
|
||||
mount = "secret"
|
||||
|
|
@ -19,6 +20,7 @@ module "crowdsec" {
|
|||
tier = local.tiers.cluster
|
||||
tls_secret_name = var.tls_secret_name
|
||||
mysql_host = var.mysql_host
|
||||
postgresql_host = var.postgresql_host
|
||||
homepage_username = local.homepage_credentials["crowdsec"]["username"]
|
||||
homepage_password = local.homepage_credentials["crowdsec"]["password"]
|
||||
enroll_key = data.vault_kv_secret_v2.secrets.data["crowdsec_enroll_key"]
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ variable "crowdsec_dash_machine_password" {
|
|||
variable "tier" { type = string }
|
||||
variable "slack_webhook_url" { type = string }
|
||||
variable "mysql_host" { type = string }
|
||||
variable "postgresql_host" { type = string }
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
|
|
@ -127,7 +128,7 @@ resource "helm_release" "crowdsec" {
|
|||
repository = "https://crowdsecurity.github.io/helm-charts"
|
||||
chart = "crowdsec"
|
||||
|
||||
values = [templatefile("${path.module}/values.yaml", { homepage_username = var.homepage_username, homepage_password = var.homepage_password, DB_PASSWORD = var.db_password, ENROLL_KEY = var.enroll_key, SLACK_WEBHOOK_URL = var.slack_webhook_url, mysql_host = var.mysql_host })]
|
||||
values = [templatefile("${path.module}/values.yaml", { homepage_username = var.homepage_username, homepage_password = var.homepage_password, DB_PASSWORD = var.db_password, ENROLL_KEY = var.enroll_key, SLACK_WEBHOOK_URL = var.slack_webhook_url, mysql_host = var.mysql_host, postgresql_host = var.postgresql_host })]
|
||||
timeout = 1200
|
||||
wait = true
|
||||
wait_for_jobs = true
|
||||
|
|
@ -338,7 +339,7 @@ resource "kubernetes_cron_job_v1" "crowdsec_blocklist_import" {
|
|||
|
||||
# Run with native mode since we are inside the CrowdSec container
|
||||
export MODE=native
|
||||
export DECISION_DURATION=24h
|
||||
export DECISION_DURATION=168h
|
||||
export FETCH_TIMEOUT=60
|
||||
export LOG_LEVEL=INFO
|
||||
|
||||
|
|
|
|||
|
|
@ -116,15 +116,17 @@ lapi:
|
|||
enabled: true
|
||||
env:
|
||||
- name: MB_DB_TYPE
|
||||
value: "mysql"
|
||||
value: "postgres"
|
||||
- name: MB_DB_DBNAME
|
||||
value: crowdsec-metabase
|
||||
value: crowdsec_metabase
|
||||
- name: MB_DB_USER
|
||||
value: "crowdsec"
|
||||
- name: MB_DB_PASS
|
||||
value: "${DB_PASSWORD}"
|
||||
- name: MB_DB_HOST
|
||||
value: "${mysql_host}"
|
||||
value: "${postgresql_host}"
|
||||
- name: MB_DB_PORT
|
||||
value: "5432"
|
||||
|
||||
- name: MB_EMAIL_SMTP_USERNAME
|
||||
value: "info@viktorbarzin.me"
|
||||
|
|
@ -206,12 +208,20 @@ config:
|
|||
|
||||
config.yaml.local: |
|
||||
db_config:
|
||||
type: mysql
|
||||
type: postgres
|
||||
user: crowdsec
|
||||
password: ${DB_PASSWORD}
|
||||
db_name: crowdsec
|
||||
host: ${mysql_host}
|
||||
port: 3306
|
||||
host: ${postgresql_host}
|
||||
port: 5432
|
||||
flush:
|
||||
max_items: 10000
|
||||
max_age: "7d"
|
||||
bouncers_autodelete:
|
||||
api_key: "30d"
|
||||
agents_autodelete:
|
||||
login_password: "30d"
|
||||
decision_bulk_size: 2000
|
||||
api:
|
||||
server:
|
||||
auto_registration: # Activate if not using TLS for authentication
|
||||
|
|
|
|||
|
|
@ -151,6 +151,24 @@ resource "kubernetes_cluster_role_binding" "mysql_sidecar_extra" {
|
|||
}
|
||||
}
|
||||
|
||||
# ConfigMap for MySQL extra config — mounted as subPath over 99-extra.cnf
|
||||
# This is the only reliable way to persist innodb_doublewrite=OFF because:
|
||||
# - spec.mycnf only applies on initial cluster creation
|
||||
# - The operator's initconf container overwrites 99-extra.cnf on every pod start
|
||||
# - SET PERSIST doesn't support innodb_doublewrite (static variable)
|
||||
resource "kubernetes_config_map" "mysql_extra_cnf" {
|
||||
metadata {
|
||||
name = "mysql-extra-cnf"
|
||||
namespace = kubernetes_namespace.dbaas.metadata[0].name
|
||||
}
|
||||
data = {
|
||||
"99-extra.cnf" = <<-EOT
|
||||
[mysqld]
|
||||
innodb_doublewrite=OFF
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "mysql_cluster" {
|
||||
namespace = kubernetes_namespace.dbaas.metadata[0].name
|
||||
create_namespace = false
|
||||
|
|
@ -195,7 +213,7 @@ resource "helm_release" "mysql_cluster" {
|
|||
}
|
||||
|
||||
serverConfig = {
|
||||
"my.cnf" = <<-EOT
|
||||
mycnf = <<-EOT
|
||||
[mysqld]
|
||||
skip-name-resolve
|
||||
# Auto-recovery after crashes: rejoin group without manual intervention
|
||||
|
|
@ -371,20 +389,12 @@ resource "kubernetes_service" "mysql" {
|
|||
depends_on = [helm_release.mysql_cluster]
|
||||
}
|
||||
|
||||
module "nfs_mysql_backup" {
|
||||
module "nfs_mysql_backup_host" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "dbaas-mysql-backup"
|
||||
name = "dbaas-mysql-backup-host"
|
||||
namespace = kubernetes_namespace.dbaas.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/mysql-backup"
|
||||
}
|
||||
|
||||
module "nfs_pgadmin" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "dbaas-pgadmin"
|
||||
namespace = kubernetes_namespace.dbaas.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/postgresql/pgadmin"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/mysql-backup"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "pgadmin_proxmox" {
|
||||
|
|
@ -409,12 +419,12 @@ resource "kubernetes_persistent_volume_claim" "pgadmin_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_postgresql_backup" {
|
||||
module "nfs_postgresql_backup_host" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "dbaas-postgresql-backup"
|
||||
name = "dbaas-postgresql-backup-host"
|
||||
namespace = kubernetes_namespace.dbaas.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/postgresql-backup"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/postgresql-backup"
|
||||
}
|
||||
|
||||
resource "kubernetes_cron_job_v1" "mysql-backup" {
|
||||
|
|
@ -495,7 +505,7 @@ resource "kubernetes_cron_job_v1" "mysql-backup" {
|
|||
volume {
|
||||
name = "mysql-backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_mysql_backup.claim_name
|
||||
claim_name = module.nfs_mysql_backup_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -988,8 +998,8 @@ resource "null_resource" "pg_cluster" {
|
|||
image = "ghcr.io/cloudnative-pg/postgis:16"
|
||||
storage_size = "20Gi"
|
||||
storage_class = "proxmox-lvm"
|
||||
memory_limit = "512Mi"
|
||||
|
||||
memory_limit = "2Gi"
|
||||
pg_params = "v2-shared512-walcomp-workmem16"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
|
|
@ -1006,6 +1016,12 @@ resource "null_resource" "pg_cluster" {
|
|||
postgresql:
|
||||
parameters:
|
||||
search_path: '"$user", public'
|
||||
shared_buffers: "512MB"
|
||||
effective_cache_size: "1536MB"
|
||||
work_mem: "16MB"
|
||||
wal_compression: "on"
|
||||
random_page_cost: "4"
|
||||
checkpoint_completion_target: "0.9"
|
||||
enableAlterSystem: true
|
||||
enableSuperuserAccess: true
|
||||
inheritedMetadata:
|
||||
|
|
@ -1019,9 +1035,9 @@ resource "null_resource" "pg_cluster" {
|
|||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "512Mi"
|
||||
memory: "2Gi"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
memory: "2Gi"
|
||||
EOF
|
||||
EOT
|
||||
}
|
||||
|
|
@ -1257,7 +1273,7 @@ resource "kubernetes_cron_job_v1" "postgresql-backup" {
|
|||
volume {
|
||||
name = "postgresql-backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_postgresql_backup.claim_name
|
||||
claim_name = module.nfs_postgresql_backup_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -119,14 +119,6 @@ resource "kubernetes_cluster_role_binding" "diun" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "diun-data"
|
||||
namespace = kubernetes_namespace.diun.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/diun"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "repo" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -22,20 +22,20 @@ resource "kubernetes_namespace" "ebook2audiobook" {
|
|||
}
|
||||
|
||||
|
||||
module "nfs_data" {
|
||||
module "nfs_data_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebook2audiobook-data"
|
||||
name = "ebook2audiobook-data-host"
|
||||
namespace = kubernetes_namespace.ebook2audiobook.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/ebook2audiobook"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/ebook2audiobook"
|
||||
}
|
||||
|
||||
module "nfs_audiblez_data" {
|
||||
module "nfs_audiblez_data_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebook2audiobook-audiblez-data"
|
||||
name = "ebook2audiobook-audiblez-data-host"
|
||||
namespace = kubernetes_namespace.ebook2audiobook.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/audiblez"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/audiblez"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "ebook2audiobook" {
|
||||
|
|
@ -109,7 +109,7 @@ resource "kubernetes_deployment" "ebook2audiobook" {
|
|||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_data.claim_name
|
||||
claim_name = module.nfs_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -306,7 +306,7 @@ resource "kubernetes_deployment" "audiblez" {
|
|||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiblez_data.claim_name
|
||||
claim_name = module.nfs_audiblez_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -392,7 +392,7 @@ resource "kubernetes_deployment" "audiblez-web" {
|
|||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiblez_data.claim_name
|
||||
claim_name = module.nfs_audiblez_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -132,12 +132,12 @@ module "tls_secret" {
|
|||
}
|
||||
|
||||
# NFS Volumes - Calibre (prefixed with ebooks- to avoid PV name clash with old stacks)
|
||||
module "nfs_calibre_library" {
|
||||
module "nfs_calibre_library_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-calibre-library"
|
||||
name = "ebooks-calibre-library-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/calibre-web-automated/calibre-library"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/calibre-web-automated/calibre-library"
|
||||
}
|
||||
|
||||
# iSCSI volume for config (SQLite DBs) - enables WAL mode for concurrent reads/writes
|
||||
|
|
@ -162,45 +162,37 @@ resource "kubernetes_persistent_volume_claim" "calibre_config_iscsi" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_calibre_ingest" {
|
||||
module "nfs_calibre_ingest_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-calibre-ingest"
|
||||
name = "ebooks-calibre-ingest-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/calibre-web-automated/cwa-book-ingest"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/calibre-web-automated/cwa-book-ingest"
|
||||
}
|
||||
|
||||
module "nfs_calibre_stacks_config" {
|
||||
module "nfs_calibre_stacks_config_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-calibre-stacks-config"
|
||||
name = "ebooks-calibre-stacks-config-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/calibre-web-automated/stacks"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/calibre-web-automated/stacks"
|
||||
}
|
||||
|
||||
# NFS Volumes - Audiobookshelf (prefixed with ebooks- to avoid PV name clash)
|
||||
module "nfs_audiobookshelf_audiobooks" {
|
||||
module "nfs_audiobookshelf_audiobooks_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-abs-audiobooks"
|
||||
name = "ebooks-abs-audiobooks-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/audiobookshelf/audiobooks"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/audiobookshelf/audiobooks"
|
||||
}
|
||||
|
||||
module "nfs_audiobookshelf_podcasts" {
|
||||
module "nfs_audiobookshelf_podcasts_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-abs-podcasts"
|
||||
name = "ebooks-abs-podcasts-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/audiobookshelf/podcasts"
|
||||
}
|
||||
|
||||
module "nfs_audiobookshelf_config" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-abs-config"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/audiobookshelf/config"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/audiobookshelf/podcasts"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "abs_config_proxmox" {
|
||||
|
|
@ -225,12 +217,12 @@ resource "kubernetes_persistent_volume_claim" "abs_config_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_audiobookshelf_metadata" {
|
||||
module "nfs_audiobookshelf_metadata_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ebooks-abs-metadata"
|
||||
name = "ebooks-abs-metadata-host"
|
||||
namespace = kubernetes_namespace.ebooks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/audiobookshelf/metadata"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/audiobookshelf/metadata"
|
||||
}
|
||||
|
||||
# Calibre-Web-Automated Deployment
|
||||
|
|
@ -335,7 +327,7 @@ resource "kubernetes_deployment" "calibre-web-automated" {
|
|||
volume {
|
||||
name = "library"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_library.claim_name
|
||||
claim_name = module.nfs_calibre_library_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
@ -347,7 +339,7 @@ resource "kubernetes_deployment" "calibre-web-automated" {
|
|||
volume {
|
||||
name = "ingest"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_ingest.claim_name
|
||||
claim_name = module.nfs_calibre_ingest_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -462,13 +454,13 @@ resource "kubernetes_deployment" "annas-archive-stacks" {
|
|||
volume {
|
||||
name = "config"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_stacks_config.claim_name
|
||||
claim_name = module.nfs_calibre_stacks_config_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "ingest"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_ingest.claim_name
|
||||
claim_name = module.nfs_calibre_ingest_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -599,13 +591,13 @@ resource "kubernetes_deployment" "audiobookshelf" {
|
|||
volume {
|
||||
name = "audiobooks"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiobookshelf_audiobooks.claim_name
|
||||
claim_name = module.nfs_audiobookshelf_audiobooks_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "podcasts"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiobookshelf_podcasts.claim_name
|
||||
claim_name = module.nfs_audiobookshelf_podcasts_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
@ -617,7 +609,7 @@ resource "kubernetes_deployment" "audiobookshelf" {
|
|||
volume {
|
||||
name = "metadata"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiobookshelf_metadata.claim_name
|
||||
claim_name = module.nfs_audiobookshelf_metadata_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -860,25 +852,25 @@ resource "kubernetes_deployment" "book_search" {
|
|||
volume {
|
||||
name = "cwa-ingest"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_ingest.claim_name
|
||||
claim_name = module.nfs_calibre_ingest_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "audiobooks"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_audiobookshelf_audiobooks.claim_name
|
||||
claim_name = module.nfs_audiobookshelf_audiobooks_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "calibre-library"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_library.claim_name
|
||||
claim_name = module.nfs_calibre_library_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "stacks-config"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_calibre_stacks_config.claim_name
|
||||
claim_name = module.nfs_calibre_stacks_config_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,14 +22,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "excalidraw-data"
|
||||
namespace = kubernetes_namespace.excalidraw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/excalidraw"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -43,14 +43,6 @@ resource "kubernetes_manifest" "external_secret" {
|
|||
depends_on = [kubernetes_namespace.f1-stream]
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "f1-stream-data"
|
||||
namespace = kubernetes_namespace.f1-stream.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/f1-stream"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -20,14 +20,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "forgejo-data"
|
||||
namespace = kubernetes_namespace.forgejo.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/forgejo"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -57,14 +57,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "freshrss-data"
|
||||
namespace = kubernetes_namespace.immich.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/freshrss/data"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
@ -87,14 +79,6 @@ resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_extensions" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "freshrss-extensions"
|
||||
namespace = kubernetes_namespace.immich.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/freshrss/extensions"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "extensions_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -23,14 +23,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_config" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "frigate-config"
|
||||
namespace = kubernetes_namespace.frigate.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/frigate/config"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "config_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
@ -53,12 +45,12 @@ resource "kubernetes_persistent_volume_claim" "config_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_media" {
|
||||
module "nfs_media_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "frigate-media"
|
||||
name = "frigate-media-host"
|
||||
namespace = kubernetes_namespace.frigate.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/frigate/media"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/frigate/media"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "frigate" {
|
||||
|
|
@ -207,7 +199,7 @@ for name, det in stats.get('detectors', {}).items():
|
|||
volume {
|
||||
name = "media"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_media.claim_name
|
||||
claim_name = module.nfs_media_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
|
|||
|
|
@ -62,14 +62,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "grampsweb-data"
|
||||
namespace = kubernetes_namespace.grampsweb.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/grampsweb"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -20,14 +20,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "hackmd-data"
|
||||
namespace = kubernetes_namespace.hackmd.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/hackmd"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -36,12 +36,12 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
module "nfs_data_host" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "headscale-data"
|
||||
name = "headscale-data-host"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/headscale"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/headscale"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
|
|
@ -472,7 +472,7 @@ resource "kubernetes_cron_job_v1" "headscale_backup" {
|
|||
volume {
|
||||
name = "backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_data.claim_name
|
||||
claim_name = module.nfs_data_host.claim_name
|
||||
}
|
||||
}
|
||||
restart_policy = "OnFailure"
|
||||
|
|
|
|||
|
|
@ -20,14 +20,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_uploads" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "health-uploads"
|
||||
namespace = kubernetes_namespace.health.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/health"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "uploads_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -66,12 +66,12 @@ variable "nfs_server" { type = string }
|
|||
# }
|
||||
# }
|
||||
|
||||
module "nfs_etcd_backup" {
|
||||
module "nfs_etcd_backup_host" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "infra-etcd-backup"
|
||||
name = "infra-etcd-backup-host"
|
||||
namespace = "default"
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/etcd-backup"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/etcd-backup"
|
||||
}
|
||||
|
||||
# # backup etcd
|
||||
|
|
@ -172,7 +172,7 @@ resource "kubernetes_cron_job_v1" "backup-etcd" {
|
|||
volume {
|
||||
name = "backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_etcd_backup.claim_name
|
||||
claim_name = module.nfs_etcd_backup_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
|
|||
|
|
@ -41,14 +41,6 @@ resource "kubernetes_manifest" "external_secret" {
|
|||
depends_on = [kubernetes_namespace.insta2spotify]
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "insta2spotify-data"
|
||||
namespace = kubernetes_namespace.insta2spotify.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/insta2spotify"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -12,14 +12,6 @@ resource "kubernetes_namespace" "isponsorblocktv" {
|
|||
# Before running, setup config using
|
||||
# docker run --rm -it -v ./youtube:/app/data -e TERM=$TERM -e COLORTERM=$COLORTERM ghcr.io/dmunozv04/isponsorblocktv --setup
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "isponsorblocktv-data"
|
||||
namespace = kubernetes_namespace.isponsorblocktv.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/isponsorblocktv/vermont"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -167,14 +167,6 @@ resource "kubernetes_secret" "opendkim_key" {
|
|||
}
|
||||
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "mailserver-data"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/mailserver"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
@ -534,6 +526,13 @@ resource "kubernetes_service" "mailserver" {
|
|||
port = 993
|
||||
target_port = "imap-secure"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "dovecot-metrics"
|
||||
protocol = "TCP"
|
||||
port = 9166
|
||||
target_port = 9166
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -550,7 +549,7 @@ resource "kubernetes_cron_job_v1" "email_roundtrip_monitor" {
|
|||
concurrency_policy = "Replace"
|
||||
failed_jobs_history_limit = 3
|
||||
successful_jobs_history_limit = 3
|
||||
schedule = "*/30 * * * *"
|
||||
schedule = "*/10 * * * *"
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
|
|
|
|||
|
|
@ -4,22 +4,6 @@ variable "roundcube_db_password" {
|
|||
}
|
||||
variable "mysql_host" { type = string }
|
||||
|
||||
module "nfs_roundcube_html" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "roundcubemail-html"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/roundcubemail/html"
|
||||
}
|
||||
|
||||
module "nfs_roundcube_enigma" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "roundcubemail-enigma"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/roundcubemail/enigma"
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "roundcubemail_config" {
|
||||
metadata {
|
||||
name = "roundcubemail.config"
|
||||
|
|
|
|||
|
|
@ -56,14 +56,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "matrix-data"
|
||||
namespace = kubernetes_namespace.matrix.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/matrix"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -65,12 +65,12 @@ resource "kubernetes_persistent_volume_claim" "files_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_backups" {
|
||||
module "nfs_backups_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "meshcentral-backups"
|
||||
name = "meshcentral-backups-host"
|
||||
namespace = kubernetes_namespace.meshcentral.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/meshcentral/meshcentral-backups"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/meshcentral/meshcentral-backups"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "meshcentral" {
|
||||
|
|
@ -201,7 +201,7 @@ EOT
|
|||
volume {
|
||||
name = "backups"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_backups.claim_name
|
||||
claim_name = module.nfs_backups_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@ resource "kubernetes_persistent_volume" "alertmanager_pv" {
|
|||
driver = "nfs.csi.k8s.io"
|
||||
volume_handle = "alertmanager-pv"
|
||||
volume_attributes = {
|
||||
server = var.nfs_server
|
||||
share = "/mnt/main/alertmanager"
|
||||
server = "192.168.1.127"
|
||||
share = "/srv/nfs/alertmanager"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -49,7 +49,8 @@ resource "kubernetes_persistent_volume" "alertmanager_pv" {
|
|||
"retrans=3",
|
||||
"actimeo=5",
|
||||
]
|
||||
storage_class_name = "nfs-truenas"
|
||||
storage_class_name = "nfs-truenas"
|
||||
persistent_volume_reclaim_policy = "Retain"
|
||||
}
|
||||
}
|
||||
# resource "kubernetes_persistent_volume_claim" "grafana_pvc" {
|
||||
|
|
|
|||
|
|
@ -22,12 +22,12 @@ resource "kubernetes_persistent_volume_claim" "prometheus_server_pvc" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_prometheus_backup" {
|
||||
module "nfs_prometheus_backup_host" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "monitoring-prometheus-backup"
|
||||
name = "monitoring-prometheus-backup-host"
|
||||
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/prometheus-backup"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/prometheus-backup"
|
||||
}
|
||||
|
||||
resource "helm_release" "prometheus" {
|
||||
|
|
|
|||
|
|
@ -1725,21 +1725,21 @@ serverFiles:
|
|||
summary: "Bank sync has not succeeded in more than 48h. Check CronJob and account auth."
|
||||
- alert: EmailRoundtripFailing
|
||||
expr: email_roundtrip_success{job="email-roundtrip-monitor"} == 0
|
||||
for: 90m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Email round-trip probe failing. Check Mailgun relay, DNS, and IMAP."
|
||||
- alert: EmailRoundtripStale
|
||||
expr: (time() - email_roundtrip_last_success_timestamp{job="email-roundtrip-monitor"}) > 5400
|
||||
for: 30m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Email round-trip probe has not succeeded in >90 min"
|
||||
summary: "Email round-trip probe failing. Check ForwardEmail relay, DNS, and IMAP."
|
||||
- alert: EmailRoundtripStale
|
||||
expr: (time() - email_roundtrip_last_success_timestamp{job="email-roundtrip-monitor"}) > 2400
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Email round-trip probe has not succeeded in >40 min"
|
||||
- alert: EmailRoundtripNeverRun
|
||||
expr: absent(email_roundtrip_success{job="email-roundtrip-monitor"})
|
||||
for: 2h
|
||||
for: 40m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
|
|
|
|||
|
|
@ -47,14 +47,6 @@ resource "kubernetes_manifest" "external_secret" {
|
|||
depends_on = [kubernetes_namespace.n8n]
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "n8n-data"
|
||||
namespace = kubernetes_namespace.n8n.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/n8n"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -58,14 +58,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "navidrome-data"
|
||||
namespace = kubernetes_namespace.navidrome.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/navidrome"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
@ -96,20 +88,20 @@ module "nfs_music" {
|
|||
nfs_path = "/volume1/music"
|
||||
}
|
||||
|
||||
module "nfs_lidarr" {
|
||||
module "nfs_lidarr_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "navidrome-lidarr"
|
||||
name = "navidrome-lidarr-host"
|
||||
namespace = kubernetes_namespace.navidrome.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/servarr/lidarr"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/servarr/lidarr"
|
||||
}
|
||||
|
||||
module "nfs_freedify" {
|
||||
module "nfs_freedify_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "navidrome-freedify"
|
||||
name = "navidrome-freedify-host"
|
||||
namespace = kubernetes_namespace.navidrome.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/freedify-music"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/freedify-music"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "navidrome" {
|
||||
|
|
@ -194,13 +186,13 @@ resource "kubernetes_deployment" "navidrome" {
|
|||
volume {
|
||||
name = "lidarr"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_lidarr.claim_name
|
||||
claim_name = module.nfs_lidarr_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "freedify"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_freedify.claim_name
|
||||
claim_name = module.nfs_freedify_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -205,12 +205,12 @@ resource "kubernetes_persistent_volume_claim" "nextcloud_data_iscsi" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_nextcloud_backup" {
|
||||
module "nfs_nextcloud_backup_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "nextcloud-backup"
|
||||
name = "nextcloud-backup-host"
|
||||
namespace = kubernetes_namespace.nextcloud.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/nextcloud-backup"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/nextcloud-backup"
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
|
|
@ -516,7 +516,7 @@ resource "kubernetes_cron_job_v1" "nextcloud-backup" {
|
|||
volume {
|
||||
name = "backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_nextcloud_backup.claim_name
|
||||
claim_name = module.nfs_nextcloud_backup_host.claim_name
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ resource "kubernetes_storage_class" "nfs_truenas" {
|
|||
]
|
||||
|
||||
parameters = {
|
||||
server = var.nfs_server
|
||||
share = "/mnt/main"
|
||||
server = "192.168.1.127"
|
||||
share = "/srv/nfs"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,14 +20,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ntfy-data"
|
||||
namespace = kubernetes_namespace.ntfy.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/ntfy"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -60,20 +60,12 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_ollama_data" {
|
||||
module "nfs_ollama_data_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ollama-data"
|
||||
name = "ollama-data-host"
|
||||
namespace = kubernetes_namespace.ollama.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/ssd/ollama"
|
||||
}
|
||||
|
||||
module "nfs_ollama_ui_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "ollama-ui-data"
|
||||
namespace = kubernetes_namespace.ollama.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/ollama"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs-ssd/ollama"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "ollama_ui_data_proxmox" {
|
||||
|
|
@ -147,7 +139,7 @@ resource "kubernetes_deployment" "ollama" {
|
|||
effect = "NoSchedule"
|
||||
}
|
||||
container {
|
||||
image = "ollama/ollama:0.6.10"
|
||||
image = "ollama/ollama:0.6.8"
|
||||
name = "ollama"
|
||||
env {
|
||||
name = "OLLAMA_HOST"
|
||||
|
|
@ -183,7 +175,7 @@ resource "kubernetes_deployment" "ollama" {
|
|||
volume {
|
||||
name = "ollama-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_ollama_data.claim_name
|
||||
claim_name = module.nfs_ollama_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -88,14 +88,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "onlyoffice-data"
|
||||
namespace = kubernetes_namespace.onlyoffice.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/onlyoffice"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -264,20 +264,12 @@ resource "random_password" "gateway_token" {
|
|||
special = false
|
||||
}
|
||||
|
||||
module "nfs_tools" {
|
||||
module "nfs_tools_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "openclaw-tools"
|
||||
name = "openclaw-tools-host"
|
||||
namespace = kubernetes_namespace.openclaw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/openclaw/tools"
|
||||
}
|
||||
|
||||
module "nfs_openclaw_home" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "openclaw-home"
|
||||
namespace = kubernetes_namespace.openclaw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/openclaw/home"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/openclaw/tools"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "home_proxmox" {
|
||||
|
|
@ -302,20 +294,12 @@ resource "kubernetes_persistent_volume_claim" "home_proxmox" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_workspace" {
|
||||
module "nfs_workspace_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "openclaw-workspace"
|
||||
name = "openclaw-workspace-host"
|
||||
namespace = kubernetes_namespace.openclaw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/openclaw/workspace"
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "openclaw-data"
|
||||
namespace = kubernetes_namespace.openclaw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/openclaw/data"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/openclaw/workspace"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
|
|
@ -594,7 +578,7 @@ resource "kubernetes_deployment" "openclaw" {
|
|||
volume {
|
||||
name = "tools"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_tools.claim_name
|
||||
claim_name = module.nfs_tools_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
@ -606,7 +590,7 @@ resource "kubernetes_deployment" "openclaw" {
|
|||
volume {
|
||||
name = "workspace"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_workspace.claim_name
|
||||
claim_name = module.nfs_workspace_host.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
|
|
@ -1064,14 +1048,6 @@ resource "kubernetes_cron_job_v1" "task_processor" {
|
|||
|
||||
# --- OpenLobster: Multi-user Telegram AI assistant (trial) ---
|
||||
|
||||
module "nfs_openlobster_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "openlobster-data"
|
||||
namespace = kubernetes_namespace.openclaw.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/openclaw/openlobster-data"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "openlobster_data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -32,20 +32,20 @@ resource "kubernetes_resource_quota_v1" "osm_routing" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_osrm_data" {
|
||||
module "nfs_osrm_data_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "osm-routing-osrm-data"
|
||||
name = "osm-routing-osrm-data-host"
|
||||
namespace = kubernetes_namespace.osm-routing.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/osm-routing/osrm-data"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/osm-routing/osrm"
|
||||
}
|
||||
|
||||
module "nfs_otp_data" {
|
||||
module "nfs_otp_data_host" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "osm-routing-otp-data"
|
||||
name = "osm-routing-otp-data-host"
|
||||
namespace = kubernetes_namespace.osm-routing.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/osm-routing/otp-data"
|
||||
nfs_server = "192.168.1.127"
|
||||
nfs_path = "/srv/nfs/osm-routing/otp"
|
||||
}
|
||||
|
||||
# --- OSRM Foot ---
|
||||
|
|
@ -102,7 +102,7 @@ resource "kubernetes_deployment" "osrm-foot" {
|
|||
volume {
|
||||
name = "osrm-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_osrm_data.claim_name
|
||||
claim_name = module.nfs_osrm_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -183,7 +183,7 @@ resource "kubernetes_deployment" "osrm-bicycle" {
|
|||
volume {
|
||||
name = "osrm-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_osrm_data.claim_name
|
||||
claim_name = module.nfs_osrm_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -268,7 +268,7 @@ resource "kubernetes_deployment" "otp" {
|
|||
volume {
|
||||
name = "otp-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_otp_data.claim_name
|
||||
claim_name = module.nfs_otp_data_host.claim_name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,14 +81,6 @@ resource "kubernetes_secret" "basic_auth" {
|
|||
}
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "owntracks-data"
|
||||
namespace = kubernetes_namespace.owntracks.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/owntracks"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -60,14 +60,6 @@ module "tls_secret" {
|
|||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../modules/kubernetes/nfs_volume"
|
||||
name = "paperless-ngx-data"
|
||||
namespace = kubernetes_namespace.paperless-ngx.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/paperless-ngx"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "data_proxmox" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Generated by Terragrunt. Sig: nIlQXj57tbuaRZEa
|
||||
terraform {
|
||||
backend "local" {
|
||||
path = "/woodpecker/src/github.com/ViktorBarzin/infra/state/stacks/platform/terraform.tfstate"
|
||||
path = "/Users/viktorbarzin/code/infra/state/stacks/platform/terraform.tfstate"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,88 +0,0 @@
|
|||
variable "tls_secret_name" {}
|
||||
variable "secret_key" {}
|
||||
variable "postgres_password" {}
|
||||
variable "tier" { type = string }
|
||||
variable "redis_host" { type = string }
|
||||
variable "homepage_token" {
|
||||
type = string
|
||||
default = ""
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.authentik.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "authentik" {
|
||||
metadata {
|
||||
name = "authentik"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_resource_quota" "authentik" {
|
||||
metadata {
|
||||
name = "authentik-quota"
|
||||
namespace = kubernetes_namespace.authentik.metadata[0].name
|
||||
}
|
||||
spec {
|
||||
hard = {
|
||||
"requests.cpu" = "16"
|
||||
"requests.memory" = "16Gi"
|
||||
"limits.memory" = "96Gi"
|
||||
pods = "50"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "authentik" {
|
||||
namespace = kubernetes_namespace.authentik.metadata[0].name
|
||||
create_namespace = true
|
||||
name = "goauthentik"
|
||||
|
||||
repository = "https://charts.goauthentik.io/"
|
||||
chart = "authentik"
|
||||
# version = "2025.8.1"
|
||||
version = "2025.10.3"
|
||||
atomic = true
|
||||
timeout = 6000
|
||||
|
||||
values = [templatefile("${path.module}/values.yaml", { postgres_password = var.postgres_password, secret_key = var.secret_key, redis_host = var.redis_host })]
|
||||
}
|
||||
|
||||
|
||||
module "ingress" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.authentik.metadata[0].name
|
||||
name = "authentik"
|
||||
service_name = "goauthentik-server"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "Authentik"
|
||||
"gethomepage.dev/description" = "Identity provider"
|
||||
"gethomepage.dev/icon" = "authentik.png"
|
||||
"gethomepage.dev/group" = "Identity & Security"
|
||||
"gethomepage.dev/pod-selector" = ""
|
||||
"gethomepage.dev/widget.type" = "authentik"
|
||||
"gethomepage.dev/widget.url" = "http://goauthentik-server.authentik.svc.cluster.local"
|
||||
"gethomepage.dev/widget.key" = var.homepage_token
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress-outpost" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.authentik.metadata[0].name
|
||||
name = "authentik-outpost"
|
||||
host = "authentik"
|
||||
service_name = "ak-outpost-authentik-embedded-outpost"
|
||||
port = 9000
|
||||
ingress_path = ["/outpost.goauthentik.io"]
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
[databases]
|
||||
authentik = host=postgresql.dbaas port=5432 dbname=authentik user=authentik password=${password}
|
||||
|
||||
[pgbouncer]
|
||||
listen_addr = 0.0.0.0
|
||||
listen_port = 6432
|
||||
auth_type = md5
|
||||
auth_file = /etc/pgbouncer/userlist.txt
|
||||
pool_mode = transaction
|
||||
max_client_conn = 200
|
||||
default_pool_size = 20
|
||||
reserve_pool_size = 5
|
||||
reserve_pool_timeout = 5
|
||||
ignore_startup_parameters = extra_float_digits
|
||||
|
|
@ -1,140 +0,0 @@
|
|||
resource "kubernetes_config_map" "pgbouncer_config" {
|
||||
metadata {
|
||||
name = "pgbouncer-config"
|
||||
namespace = "authentik"
|
||||
}
|
||||
|
||||
data = {
|
||||
"pgbouncer.ini" = templatefile("${path.module}/pgbouncer.ini", { password = var.postgres_password })
|
||||
}
|
||||
}
|
||||
|
||||
# --- 2️⃣ Secret for user credentials ---
|
||||
resource "kubernetes_secret" "pgbouncer_auth" {
|
||||
metadata {
|
||||
name = "pgbouncer-auth"
|
||||
namespace = "authentik"
|
||||
}
|
||||
|
||||
data = {
|
||||
"userlist.txt" = templatefile("${path.module}/userlist.txt", { password = var.postgres_password })
|
||||
}
|
||||
|
||||
type = "Opaque"
|
||||
}
|
||||
|
||||
# --- 3️⃣ Deployment ---
|
||||
resource "kubernetes_deployment" "pgbouncer" {
|
||||
metadata {
|
||||
name = "pgbouncer"
|
||||
namespace = "authentik"
|
||||
labels = {
|
||||
app = "pgbouncer"
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
replicas = 3
|
||||
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "pgbouncer"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "pgbouncer"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
affinity {
|
||||
pod_anti_affinity {
|
||||
required_during_scheduling_ignored_during_execution {
|
||||
label_selector {
|
||||
match_expressions {
|
||||
key = "component"
|
||||
operator = "In"
|
||||
values = ["server"]
|
||||
}
|
||||
}
|
||||
topology_key = "kubernetes.io/hostname"
|
||||
}
|
||||
}
|
||||
}
|
||||
container {
|
||||
name = "pgbouncer"
|
||||
image = "edoburu/pgbouncer:latest"
|
||||
image_pull_policy = "IfNotPresent"
|
||||
|
||||
port {
|
||||
container_port = 6432
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/etc/pgbouncer/pgbouncer.ini"
|
||||
sub_path = "pgbouncer.ini"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "auth"
|
||||
mount_path = "/etc/pgbouncer/userlist.txt"
|
||||
sub_path = "userlist.txt"
|
||||
}
|
||||
|
||||
env {
|
||||
name = "DATABASES_AUTHENTIK"
|
||||
value = "host=postgres port=5432 dbname=authentik user=authentik password=${var.postgres_password}"
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "config"
|
||||
config_map {
|
||||
name = kubernetes_config_map.pgbouncer_config.metadata[0].name
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "auth"
|
||||
secret {
|
||||
secret_name = kubernetes_secret.pgbouncer_auth.metadata[0].name
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
depends_on = [kubernetes_secret.pgbouncer_auth]
|
||||
}
|
||||
|
||||
# --- 4️⃣ Service ---
|
||||
resource "kubernetes_service" "pgbouncer" {
|
||||
metadata {
|
||||
name = "pgbouncer"
|
||||
namespace = "authentik"
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = "pgbouncer"
|
||||
}
|
||||
|
||||
port {
|
||||
port = 6432
|
||||
target_port = 6432
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
type = "ClusterIP"
|
||||
}
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
"authentik" "${password}"
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
authentik:
|
||||
log_level: warning
|
||||
# log_level: trace
|
||||
secret_key: "${secret_key}"
|
||||
# This sends anonymous usage-data, stack traces on errors and
|
||||
# performance data to authentik.error-reporting.a7k.io, and is fully opt-in
|
||||
error_reporting:
|
||||
enabled: true
|
||||
postgresql:
|
||||
# host: postgresql.dbaas
|
||||
host: pgbouncer.authentik
|
||||
port: 6432
|
||||
user: authentik
|
||||
password: ${postgres_password}
|
||||
redis:
|
||||
host: ${redis_host}
|
||||
|
||||
server:
|
||||
replicas: 3
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
memory: 1Gi
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: server
|
||||
ingress:
|
||||
enabled: false
|
||||
# hosts:
|
||||
# - authentik.viktorbarzin.me
|
||||
podAnnotations:
|
||||
diun.enable: true
|
||||
diun.include_tags: "^202[0-9].[0-9]+.*$" # no need to annotate the worker as it uses the same image
|
||||
pdb:
|
||||
enabled: true
|
||||
minAvailable: 2
|
||||
global:
|
||||
addPrometheusAnnotations: true
|
||||
|
||||
worker:
|
||||
replicas: 3
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 896Mi
|
||||
limits:
|
||||
memory: 896Mi
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: worker
|
||||
pdb:
|
||||
enabled: true
|
||||
maxUnavailable: 1
|
||||
|
|
@ -1,173 +0,0 @@
|
|||
# Contents for cloudflare account
|
||||
variable "cloudflare_api_key" {}
|
||||
variable "cloudflare_email" {}
|
||||
variable "cloudflare_proxied_names" { type = list(string) }
|
||||
variable "cloudflare_non_proxied_names" { type = list(string) }
|
||||
variable "cloudflare_zone_id" {
|
||||
description = "Zone ID for your domain"
|
||||
type = string
|
||||
}
|
||||
variable "cloudflare_account_id" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "cloudflare_tunnel_id" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "public_ip" {
|
||||
type = string
|
||||
}
|
||||
variable "public_ipv6" {
|
||||
type = string
|
||||
description = "Public IPv6 address for AAAA records (from HE tunnel broker)"
|
||||
}
|
||||
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "~> 4"
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
provider "cloudflare" {
|
||||
api_key = var.cloudflare_api_key # I gave up on getting the permissions on the token...
|
||||
email = var.cloudflare_email
|
||||
}
|
||||
|
||||
|
||||
locals {
|
||||
cloudflare_proxied_names_map = {
|
||||
for h in var.cloudflare_proxied_names :
|
||||
h => h
|
||||
}
|
||||
cloudflare_non_proxied_names_map = {
|
||||
for h in var.cloudflare_non_proxied_names :
|
||||
h => h
|
||||
}
|
||||
}
|
||||
|
||||
resource "cloudflare_zero_trust_tunnel_cloudflared_config" "sof" {
|
||||
account_id = var.cloudflare_account_id
|
||||
tunnel_id = var.cloudflare_tunnel_id
|
||||
|
||||
config {
|
||||
warp_routing {
|
||||
enabled = true
|
||||
}
|
||||
dynamic "ingress_rule" {
|
||||
for_each = toset(var.cloudflare_proxied_names)
|
||||
content {
|
||||
hostname = ingress_rule.value == "viktorbarzin.me" ? ingress_rule.value : "${ingress_rule.value}.viktorbarzin.me"
|
||||
path = "/"
|
||||
service = "https://10.0.20.200:443"
|
||||
origin_request {
|
||||
no_tls_verify = true
|
||||
}
|
||||
}
|
||||
}
|
||||
ingress_rule {
|
||||
service = "http_status:404"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "dns_record" {
|
||||
# count = length(var.cloudflare_proxied_names)
|
||||
# name = var.cloudflare_proxied_names[count.index]
|
||||
for_each = local.cloudflare_proxied_names_map
|
||||
name = each.key
|
||||
|
||||
content = "${var.cloudflare_tunnel_id}.cfargotunnel.com"
|
||||
proxied = true
|
||||
ttl = 1
|
||||
type = "CNAME"
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "non_proxied_dns_record" {
|
||||
# count = length(var.cloudflare_non_proxied_names)
|
||||
# name = var.cloudflare_non_proxied_names[count.index]
|
||||
for_each = local.cloudflare_non_proxied_names_map
|
||||
name = each.key
|
||||
|
||||
# content = var.non_proxied_names[count.index].ip
|
||||
content = var.public_ip
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "A"
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
|
||||
resource "cloudflare_record" "non_proxied_dns_record_ipv6" {
|
||||
for_each = local.cloudflare_non_proxied_names_map
|
||||
name = each.key
|
||||
content = var.public_ipv6
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "AAAA"
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mail" {
|
||||
content = "mail.viktorbarzin.me"
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "MX"
|
||||
priority = 1
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mail_domainkey" {
|
||||
content = "\"k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDIDLB8mhAHNqs1s6GeZMQHOxWweoNKIrqo5tqRM3yFilgfPUX34aTIXNZg9xAmlK+2S/xXO1ymt127ZGMjnoFKOEP8/uZ54iHTCnioHaPZWMfJ7o6TYIXjr+9ShKfoJxZLv7lHJ2wKQK3yOw4lg4cvja5nxQ6fNoGRwo+mQ/mgJQIDAQAB\""
|
||||
name = "s1._domainkey.viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "TXT"
|
||||
priority = 1
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mail_spf" {
|
||||
content = "\"v=spf1 include:mailgun.org ~all\""
|
||||
name = "viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "TXT"
|
||||
priority = 1
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mail_dmarc" {
|
||||
content = "\"v=DMARC1; p=quarantine; pct=100; fo=1; ri=3600; sp=quarantine; adkim=r; aspf=r; rua=mailto:e21c0ff8@dmarc.mailgun.org,mailto:adb84997@inbox.ondmarc.com; ruf=mailto:e21c0ff8@dmarc.mailgun.org,mailto:adb84997@inbox.ondmarc.com,mailto:postmaster@viktorbarzin.me;\""
|
||||
name = "_dmarc.viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 1
|
||||
type = "TXT"
|
||||
priority = 1
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "keyserver" {
|
||||
content = "130.162.165.220" # Oracle VPS
|
||||
name = "keyserver.viktorbarzin.me"
|
||||
proxied = false
|
||||
ttl = 3600
|
||||
type = "A"
|
||||
priority = 1
|
||||
zone_id = var.cloudflare_zone_id
|
||||
}
|
||||
|
||||
# Enable HTTP/3 (QUIC) for Cloudflare-proxied domains
|
||||
resource "cloudflare_zone_settings_override" "http3" {
|
||||
zone_id = var.cloudflare_zone_id
|
||||
|
||||
settings {
|
||||
http3 = "on"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
# Contents for cloudflare tunnel
|
||||
|
||||
variable "tls_secret_name" {}
|
||||
variable "cloudflare_tunnel_token" {}
|
||||
resource "kubernetes_namespace" "cloudflared" {
|
||||
metadata {
|
||||
name = "cloudflared"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
}
|
||||
variable "tier" { type = string }
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "cloudflared" {
|
||||
metadata {
|
||||
name = "cloudflared"
|
||||
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
||||
labels = {
|
||||
app = "cloudflared"
|
||||
tier = var.tier
|
||||
}
|
||||
annotations = {
|
||||
"reloader.stakater.com/search" = "true"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 3
|
||||
strategy {
|
||||
type = "RollingUpdate"
|
||||
}
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "cloudflared"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "cloudflared"
|
||||
}
|
||||
annotations = {
|
||||
"diun.enable" = "true"
|
||||
"diun.include_tags" = "^\\d{4}\\.\\d+\\.\\d+$"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
topology_spread_constraint {
|
||||
max_skew = 1
|
||||
topology_key = "kubernetes.io/hostname"
|
||||
when_unsatisfiable = "ScheduleAnyway"
|
||||
label_selector {
|
||||
match_labels = {
|
||||
app = "cloudflared"
|
||||
}
|
||||
}
|
||||
}
|
||||
container {
|
||||
# image = "wisdomsky/cloudflared-web:latest"
|
||||
image = "cloudflare/cloudflared:2026.3.0"
|
||||
name = "cloudflared"
|
||||
command = ["cloudflared", "tunnel", "run"]
|
||||
env {
|
||||
name = "TUNNEL_TOKEN"
|
||||
value = var.cloudflare_tunnel_token
|
||||
}
|
||||
|
||||
port {
|
||||
container_port = 14333
|
||||
}
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "15m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_pod_disruption_budget_v1" "cloudflared" {
|
||||
metadata {
|
||||
name = "cloudflared"
|
||||
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
||||
}
|
||||
spec {
|
||||
max_unavailable = "1"
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "cloudflared"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "cloudflared" {
|
||||
metadata {
|
||||
name = "cloudflared"
|
||||
namespace = kubernetes_namespace.cloudflared.metadata[0].name
|
||||
labels = {
|
||||
"app" = "cloudflared"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = "cloudflared"
|
||||
}
|
||||
port {
|
||||
name = "http"
|
||||
target_port = 14333
|
||||
port = 80
|
||||
protocol = "TCP"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
variable "tier" { type = string }
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Namespace
|
||||
# -----------------------------------------------------------------------------
|
||||
resource "kubernetes_namespace" "cnpg_system" {
|
||||
metadata {
|
||||
name = "cnpg-system"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# CloudNativePG Operator — manages PostgreSQL clusters via CRDs
|
||||
# https://cloudnative-pg.io/
|
||||
# -----------------------------------------------------------------------------
|
||||
resource "helm_release" "cnpg" {
|
||||
namespace = kubernetes_namespace.cnpg_system.metadata[0].name
|
||||
create_namespace = false
|
||||
name = "cnpg"
|
||||
atomic = true
|
||||
timeout = 300
|
||||
|
||||
repository = "https://cloudnative-pg.github.io/charts"
|
||||
chart = "cloudnative-pg"
|
||||
version = "0.27.1"
|
||||
|
||||
values = [yamlencode({
|
||||
crds = {
|
||||
create = true
|
||||
}
|
||||
|
||||
replicaCount = 1
|
||||
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "256Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "256Mi"
|
||||
}
|
||||
}
|
||||
})]
|
||||
}
|
||||
|
||||
# NOTE: local-path-provisioner is already installed in the cluster
|
||||
# (via cloud-init template) with StorageClass "local-path" (default).
|
||||
# ReclaimPolicy is "Delete" — for CNPG clusters, set
|
||||
# .spec.storage.pvcTemplate.storageClassName = "local-path" in the
|
||||
# Cluster CR. CNPG handles PVC lifecycle independently.
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
controller:
|
||||
extraVolumes:
|
||||
- name: crowdsec-bouncer-plugin
|
||||
emptyDir: {}
|
||||
extraInitContainers:
|
||||
- name: init-clone-crowdsec-bouncer
|
||||
image: crowdsecurity/lua-bouncer-plugin
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: API_URL
|
||||
value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name
|
||||
- name: API_KEY
|
||||
value: "<API KEY>" # generated with `cscli bouncers add -n <bouncer_name>
|
||||
- name: BOUNCER_CONFIG
|
||||
value: "/crowdsec/crowdsec-bouncer.conf"
|
||||
- name: CAPTCHA_PROVIDER
|
||||
value: "recaptcha" # valid providers are recaptcha, hcaptcha, turnstile
|
||||
- name: SECRET_KEY
|
||||
value: "<your-captcha-secret-key>" # If you want captcha support otherwise remove this ENV VAR
|
||||
- name: SITE_KEY
|
||||
value: "<your-captcha-site-key>" # If you want captcha support otherwise remove this ENV VAR
|
||||
- name: BAN_TEMPLATE_PATH
|
||||
value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html
|
||||
- name: CAPTCHA_TEMPLATE_PATH
|
||||
value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html
|
||||
command:
|
||||
[
|
||||
"sh",
|
||||
"-c",
|
||||
"sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/",
|
||||
]
|
||||
volumeMounts:
|
||||
- name: crowdsec-bouncer-plugin
|
||||
mountPath: /lua_plugins
|
||||
extraVolumeMounts:
|
||||
- name: crowdsec-bouncer-plugin
|
||||
mountPath: /etc/nginx/lua/plugins/crowdsec
|
||||
subPath: crowdsec
|
||||
config:
|
||||
plugins: "crowdsec"
|
||||
lua-shared-dicts: "crowdsec_cache: 50m"
|
||||
server-snippet: |
|
||||
lua_ssl_trusted_certificate "/etc/ssl/certs/ca-certificates.crt"; # If you want captcha support otherwise remove this line
|
||||
resolver local=on ipv6=off;
|
||||
|
|
@ -1,376 +0,0 @@
|
|||
variable "tls_secret_name" {}
|
||||
variable "homepage_username" {}
|
||||
variable "homepage_password" {}
|
||||
variable "db_password" {}
|
||||
variable "enroll_key" {}
|
||||
variable "crowdsec_dash_api_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "crowdsec_dash_machine_id" { type = string } # used for web dash
|
||||
variable "crowdsec_dash_machine_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "tier" { type = string }
|
||||
variable "slack_webhook_url" { type = string }
|
||||
variable "mysql_host" { type = string }
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "crowdsec" {
|
||||
metadata {
|
||||
name = "crowdsec"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "crowdsec_custom_scenarios" {
|
||||
metadata {
|
||||
name = "crowdsec-custom-scenarios"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "crowdsec"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
"http-403-abuse.yaml" = <<-YAML
|
||||
type: leaky
|
||||
name: crowdsecurity/http-403-abuse
|
||||
description: "Detect IPs triggering too many HTTP 403s in NGINX ingress logs"
|
||||
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '403'"
|
||||
groupby: "evt.Meta.source_ip"
|
||||
leakspeed: "2s"
|
||||
capacity: 10
|
||||
blackhole: 5m
|
||||
labels:
|
||||
service: http
|
||||
behavior: abusive_403
|
||||
remediation: true
|
||||
YAML
|
||||
"http-429-abuse.yaml" : <<-YAML
|
||||
type: leaky
|
||||
name: crowdsecurity/http-429-abuse
|
||||
description: "Detect IPs repeatedly triggering rate-limit (HTTP 429)"
|
||||
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '429'"
|
||||
groupby: "evt.Meta.source_ip"
|
||||
leakspeed: "10s"
|
||||
capacity: 5
|
||||
blackhole: 1m
|
||||
labels:
|
||||
service: http
|
||||
behavior: rate_limit_abuse
|
||||
remediation: true
|
||||
YAML
|
||||
}
|
||||
}
|
||||
|
||||
# Whitelist for trusted IPs that should never be blocked
|
||||
resource "kubernetes_config_map" "crowdsec_whitelist" {
|
||||
metadata {
|
||||
name = "crowdsec-whitelist"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "crowdsec"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
"whitelist.yaml" = <<-YAML
|
||||
name: crowdsecurity/whitelist-trusted-ips
|
||||
description: "Whitelist for trusted IPs that should never be blocked"
|
||||
whitelist:
|
||||
reason: "Trusted IP - never block"
|
||||
ip:
|
||||
- "176.12.22.76"
|
||||
YAML
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "helm_release" "crowdsec" {
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
create_namespace = true
|
||||
name = "crowdsec"
|
||||
atomic = true
|
||||
version = "0.21.0"
|
||||
|
||||
repository = "https://crowdsecurity.github.io/helm-charts"
|
||||
chart = "crowdsec"
|
||||
|
||||
values = [templatefile("${path.module}/values.yaml", { homepage_username = var.homepage_username, homepage_password = var.homepage_password, DB_PASSWORD = var.db_password, ENROLL_KEY = var.enroll_key, SLACK_WEBHOOK_URL = var.slack_webhook_url, mysql_host = var.mysql_host })]
|
||||
timeout = 900
|
||||
wait = true
|
||||
wait_for_jobs = true
|
||||
}
|
||||
|
||||
|
||||
# Deployment for my custom dashboard that helps me unblock myself when I blocklist myself
|
||||
resource "kubernetes_deployment" "crowdsec-web" {
|
||||
metadata {
|
||||
name = "crowdsec-web"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
labels = {
|
||||
app = "crowdsec_web"
|
||||
"kubernetes.io/cluster-service" = "true"
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
strategy {
|
||||
type = "RollingUpdate"
|
||||
}
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "crowdsec_web"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "crowdsec_web"
|
||||
"kubernetes.io/cluster-service" = "true"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
priority_class_name = "tier-1-cluster"
|
||||
container {
|
||||
name = "crowdsec-web"
|
||||
image = "viktorbarzin/crowdsec_web"
|
||||
env {
|
||||
name = "CS_API_URL"
|
||||
value = "http://crowdsec-service.crowdsec.svc.cluster.local:8080/v1"
|
||||
}
|
||||
env {
|
||||
name = "CS_API_KEY"
|
||||
value = var.crowdsec_dash_api_key
|
||||
}
|
||||
env {
|
||||
name = "CS_MACHINE_ID"
|
||||
value = var.crowdsec_dash_machine_id
|
||||
}
|
||||
env {
|
||||
name = "CS_MACHINE_PASSWORD"
|
||||
value = var.crowdsec_dash_machine_password
|
||||
}
|
||||
port {
|
||||
name = "http"
|
||||
container_port = 8000
|
||||
protocol = "TCP"
|
||||
}
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "15m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "crowdsec-web" {
|
||||
metadata {
|
||||
name = "crowdsec-web"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
labels = {
|
||||
"app" = "crowdsec_web"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = "crowdsec_web"
|
||||
}
|
||||
port {
|
||||
port = "80"
|
||||
target_port = "8000"
|
||||
}
|
||||
}
|
||||
}
|
||||
module "ingress" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
name = "crowdsec-web"
|
||||
protected = true
|
||||
tls_secret_name = var.tls_secret_name
|
||||
exclude_crowdsec = true
|
||||
rybbit_site_id = "d09137795ccc"
|
||||
}
|
||||
|
||||
# CronJob to import public blocklists into CrowdSec
|
||||
# https://github.com/wolffcatskyy/crowdsec-blocklist-import
|
||||
# Uses kubectl exec to run in an existing CrowdSec agent pod that's already registered
|
||||
resource "kubernetes_cron_job_v1" "crowdsec_blocklist_import" {
|
||||
metadata {
|
||||
name = "crowdsec-blocklist-import"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
labels = {
|
||||
app = "crowdsec-blocklist-import"
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
# Run daily at 4 AM
|
||||
schedule = "0 4 * * *"
|
||||
timezone = "Europe/London"
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 3
|
||||
|
||||
job_template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "crowdsec-blocklist-import"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
backoff_limit = 3
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "crowdsec-blocklist-import"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
service_account_name = kubernetes_service_account.blocklist_import.metadata[0].name
|
||||
restart_policy = "OnFailure"
|
||||
|
||||
container {
|
||||
name = "blocklist-import"
|
||||
image = "bitnami/kubectl:latest"
|
||||
|
||||
command = ["/bin/bash", "-c"]
|
||||
args = [
|
||||
<<-EOF
|
||||
set -e
|
||||
|
||||
echo "Finding CrowdSec agent pod..."
|
||||
AGENT_POD=$(kubectl get pods -n crowdsec -l k8s-app=crowdsec,type=agent -o jsonpath='{.items[0].metadata.name}')
|
||||
|
||||
if [ -z "$AGENT_POD" ]; then
|
||||
echo "ERROR: Could not find CrowdSec agent pod"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using agent pod: $AGENT_POD"
|
||||
|
||||
# Download the import script
|
||||
echo "Downloading blocklist import script..."
|
||||
curl -fsSL -o /tmp/import.sh \
|
||||
https://raw.githubusercontent.com/wolffcatskyy/crowdsec-blocklist-import/main/import.sh
|
||||
chmod +x /tmp/import.sh
|
||||
|
||||
# Copy script to agent pod and execute
|
||||
echo "Copying script to agent pod and executing..."
|
||||
kubectl cp /tmp/import.sh crowdsec/$AGENT_POD:/tmp/import.sh
|
||||
|
||||
kubectl exec -n crowdsec "$AGENT_POD" -- /bin/bash -c '
|
||||
set -e
|
||||
|
||||
# Run with native mode since we are inside the CrowdSec container
|
||||
export MODE=native
|
||||
export DECISION_DURATION=24h
|
||||
export FETCH_TIMEOUT=60
|
||||
export LOG_LEVEL=INFO
|
||||
|
||||
/tmp/import.sh
|
||||
|
||||
# Cleanup
|
||||
rm -f /tmp/import.sh
|
||||
'
|
||||
|
||||
echo "Blocklist import completed successfully!"
|
||||
EOF
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Service account for the blocklist import job (needs kubectl exec permissions)
|
||||
resource "kubernetes_service_account" "blocklist_import" {
|
||||
metadata {
|
||||
name = "crowdsec-blocklist-import"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_role" "blocklist_import" {
|
||||
metadata {
|
||||
name = "crowdsec-blocklist-import"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
}
|
||||
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["pods"]
|
||||
verbs = ["get", "list"]
|
||||
}
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["pods/exec"]
|
||||
verbs = ["create"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_role_binding" "blocklist_import" {
|
||||
metadata {
|
||||
name = "crowdsec-blocklist-import"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
}
|
||||
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "Role"
|
||||
name = kubernetes_role.blocklist_import.metadata[0].name
|
||||
}
|
||||
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = kubernetes_service_account.blocklist_import.metadata[0].name
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
}
|
||||
}
|
||||
|
||||
# Custom ResourceQuota for CrowdSec — needs more than default 1-cluster quota
|
||||
# because it runs DaemonSet agents (1 per worker node) + 3 LAPI replicas + web UI
|
||||
resource "kubernetes_resource_quota" "crowdsec" {
|
||||
metadata {
|
||||
name = "crowdsec-quota"
|
||||
namespace = kubernetes_namespace.crowdsec.metadata[0].name
|
||||
}
|
||||
spec {
|
||||
hard = {
|
||||
"requests.cpu" = "4"
|
||||
"requests.memory" = "8Gi"
|
||||
"limits.memory" = "16Gi"
|
||||
pods = "30"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,229 +0,0 @@
|
|||
# values from - https://github.com/crowdsecurity/helm-charts/blob/main/charts/crowdsec/values.yaml
|
||||
container_runtime: containerd
|
||||
|
||||
agent:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
memory: 512Mi
|
||||
priorityClassName: "tier-1-cluster"
|
||||
# To specify each pod you want to process it logs (pods present in the node)
|
||||
acquisition:
|
||||
# The namespace where the pod is located
|
||||
- namespace: traefik
|
||||
# The pod name
|
||||
podName: traefik-*
|
||||
# as in crowdsec configuration, we need to specify the program name so the parser will match and parse logs
|
||||
program: traefik
|
||||
# Those are ENV variables
|
||||
env:
|
||||
# As it's a test, we don't want to share signals with CrowdSec so disable the Online API.
|
||||
# - name: DISABLE_ONLINE_API
|
||||
# value: "true"
|
||||
# As we are running Traefik, we want to install the Traefik collection
|
||||
- name: COLLECTIONS
|
||||
value: "crowdsecurity/traefik crowdsecurity/base-http-scenarios crowdsecurity/http-cve"
|
||||
- name: SCENARIOS
|
||||
value: ""
|
||||
# value: "crowdsecurity/http-crawl-aggressive"
|
||||
# Mount custom scenarios into /etc/crowdsec/scenarios
|
||||
extraVolumeMounts:
|
||||
- name: custom-scenarios
|
||||
mountPath: /etc/crowdsec/scenarios/http-403-abuse.yaml
|
||||
subPath: "http-403-abuse.yaml"
|
||||
readonly: true
|
||||
- name: custom-scenarios
|
||||
mountPath: /etc/crowdsec/scenarios/http-429-abuse.yaml
|
||||
subPath: "http-429-abuse.yaml"
|
||||
readonly: true
|
||||
- name: whitelist
|
||||
mountPath: /etc/crowdsec/parsers/s02-enrich/whitelist.yaml
|
||||
subPath: "whitelist.yaml"
|
||||
readonly: true
|
||||
extraVolumes:
|
||||
- name: custom-scenarios
|
||||
configMap:
|
||||
name: crowdsec-custom-scenarios
|
||||
- name: whitelist
|
||||
configMap:
|
||||
name: crowdsec-whitelist
|
||||
podAnnotations:
|
||||
dependency.kyverno.io/wait-for: "mysql.dbaas:3306"
|
||||
|
||||
lapi:
|
||||
resources:
|
||||
requests:
|
||||
cpu: 25m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
memory: 1Gi
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
failureThreshold: 30
|
||||
periodSeconds: 10
|
||||
priorityClassName: "tier-1-cluster"
|
||||
replicas: 3
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: crowdsec
|
||||
type: lapi
|
||||
pdb:
|
||||
enabled: true
|
||||
maxUnavailable: 1
|
||||
extraSecrets:
|
||||
dbPassword: "${DB_PASSWORD}"
|
||||
storeCAPICredentialsInSecret: true
|
||||
persistentVolume:
|
||||
config:
|
||||
enabled: false
|
||||
data:
|
||||
enabled: false
|
||||
env:
|
||||
- name: ENROLL_KEY
|
||||
value: "${ENROLL_KEY}"
|
||||
- name: ENROLL_INSTANCE_NAME
|
||||
value: "k8s-cluster"
|
||||
- name: ENROLL_TAGS
|
||||
value: "k8s linux"
|
||||
- name: DB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: crowdsec-lapi-secrets
|
||||
key: dbPassword
|
||||
# As it's a test, we don't want to share signals with CrowdSec, so disable the Online API.
|
||||
# - name: DISABLE_ONLINE_API
|
||||
# value: "true"
|
||||
dashboard:
|
||||
enabled: true
|
||||
env:
|
||||
- name: MB_DB_TYPE
|
||||
value: "mysql"
|
||||
- name: MB_DB_DBNAME
|
||||
value: crowdsec-metabase
|
||||
- name: MB_DB_USER
|
||||
value: "crowdsec"
|
||||
- name: MB_DB_PASS
|
||||
value: "${DB_PASSWORD}"
|
||||
- name: MB_DB_HOST
|
||||
value: "${mysql_host}"
|
||||
|
||||
- name: MB_EMAIL_SMTP_USERNAME
|
||||
value: "info@viktorbarzin.me"
|
||||
- name: MB_EMAIL_FROM_ADDRESS
|
||||
value: "info@viktorbarzin.me"
|
||||
- name: MB_EMAIL_SMTP_HOST
|
||||
value: "mailserver.mailserver.svc.cluster.local"
|
||||
- name: MB_EMAIL_SMTP_PASSWORD
|
||||
value: "" # Ignore for now as it's unclear what notifications we can get
|
||||
- name: MB_EMAIL_SMTP_PORT
|
||||
value: "587"
|
||||
- name: MB_EMAIL_SMTP_SECURITY
|
||||
value: "starttls"
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
#nginx.ingress.kubernetes.io/auth-url: "https://oauth2.viktorbarzin.me/oauth2/auth"
|
||||
nginx.ingress.kubernetes.io/auth-url: "http://ak-outpost-authentik-embedded-outpost.authentik.svc.cluster.local:9000/outpost.goauthentik.io/auth/nginx"
|
||||
# nginx.ingress.kubernetes.io/auth-signin: "https://oauth2.viktorbarzin.me/oauth2/start?rd=/redirect/$http_host$escaped_request_uri"
|
||||
nginx.ingress.kubernetes.io/auth-signin: "https://authentik.viktorbarzin.me/outpost.goauthentik.io/start?rd=$scheme%3A%2F%2F$host$escaped_request_uri"
|
||||
nginx.ingress.kubernetes.io/auth-response-headers: "Set-Cookie,X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid"
|
||||
nginx.ingress.kubernetes.io/auth-snippet: "proxy_set_header X-Forwarded-Host $http_host;"
|
||||
gethomepage.dev/enabled: "true"
|
||||
gethomepage.dev/description: "Web Application Firewall"
|
||||
gethomepage.dev/icon: "crowdsec.png"
|
||||
gethomepage.dev/name: "CrowdSec"
|
||||
gethomepage.dev/group: "Identity & Security"
|
||||
gethomepage.dev/widget.type: "crowdsec"
|
||||
gethomepage.dev/widget.url: "http://crowdsec-service.crowdsec.svc.cluster.local:8080"
|
||||
gethomepage.dev/widget.username: "${homepage_username}"
|
||||
gethomepage.dev/widget.password: "${homepage_password}"
|
||||
gethomepage.dev/pod-selector: ""
|
||||
ingressClassName: "nginx"
|
||||
host: "crowdsec.viktorbarzin.me"
|
||||
tls:
|
||||
- hosts:
|
||||
- crowdsec.viktorbarzin.me
|
||||
secretName: "tls-secret"
|
||||
metrics:
|
||||
enabled: true
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
|
||||
config:
|
||||
# Custom profiles: captcha for rate limiting, ban for attacks
|
||||
profiles.yaml: |
|
||||
# Captcha for rate limiting and 403 abuse (user can unblock themselves)
|
||||
name: captcha_remediation
|
||||
filters:
|
||||
- Alert.Remediation == true && Alert.GetScope() == "Ip" && Alert.GetScenario() in ["crowdsecurity/http-429-abuse", "crowdsecurity/http-403-abuse", "crowdsecurity/http-crawl-non_statics", "crowdsecurity/http-sensitive-files"]
|
||||
decisions:
|
||||
- type: captcha
|
||||
duration: 4h
|
||||
notifications:
|
||||
- slack_alerts
|
||||
on_success: break
|
||||
---
|
||||
# Default: Ban for serious attacks (CVE exploits, scanners, brute force)
|
||||
name: default_ip_remediation
|
||||
filters:
|
||||
- Alert.Remediation == true && Alert.GetScope() == "Ip"
|
||||
decisions:
|
||||
- type: ban
|
||||
duration: 4h
|
||||
notifications:
|
||||
- slack_alerts
|
||||
on_success: break
|
||||
---
|
||||
name: default_range_remediation
|
||||
filters:
|
||||
- Alert.Remediation == true && Alert.GetScope() == "Range"
|
||||
decisions:
|
||||
- type: ban
|
||||
duration: 4h
|
||||
notifications:
|
||||
- slack_alerts
|
||||
on_success: break
|
||||
|
||||
config.yaml.local: |
|
||||
db_config:
|
||||
type: mysql
|
||||
user: crowdsec
|
||||
password: ${DB_PASSWORD}
|
||||
db_name: crowdsec
|
||||
host: ${mysql_host}
|
||||
port: 3306
|
||||
api:
|
||||
server:
|
||||
auto_registration: # Activate if not using TLS for authentication
|
||||
enabled: true
|
||||
token: "$${REGISTRATION_TOKEN}" # /!\ do not change
|
||||
allowed_ranges: # /!\ adapt to the pod IP ranges used by your cluster
|
||||
- "127.0.0.1/32"
|
||||
- "192.168.0.0/16"
|
||||
- "10.0.0.0/8"
|
||||
- "172.16.0.0/12"
|
||||
|
||||
notifications:
|
||||
slack.yaml: |
|
||||
type: slack
|
||||
name: slack_alerts
|
||||
log_level: info
|
||||
format: |
|
||||
:rotating_light: *CrowdSec Alert*
|
||||
{{range .}}
|
||||
*Scenario:* {{.Alert.Scenario}}
|
||||
*Source IP:* {{.Alert.Source.IP}} ({{.Alert.Source.Cn}})
|
||||
*Decisions:*
|
||||
{{range .Alert.Decisions}} - {{.Type}} for {{.Duration}} (scope: {{.Scope}}, value: {{.Value}})
|
||||
{{end}}
|
||||
{{end}}
|
||||
webhook: ${SLACK_WEBHOOK_URL}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
tls:
|
||||
useSelfSigned: true
|
||||
credentials:
|
||||
root:
|
||||
password: ${root_password}
|
||||
user: root
|
||||
serverInstances: 1
|
||||
podSpec:
|
||||
containers:
|
||||
- name: mysql
|
||||
resources:
|
||||
requests:
|
||||
memory: "1024Mi" # adapt to your needs
|
||||
cpu: "100m" # adapt to your needs
|
||||
limits:
|
||||
memory: "2048Mi" # adapt to your needs
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
apiVersion: mysql.presslabs.org/v1alpha1
|
||||
kind: MysqlCluster
|
||||
metadata:
|
||||
name: mysql-cluster
|
||||
namespace: dbaas
|
||||
spec:
|
||||
mysqlVersion: "5.7"
|
||||
replicas: 1
|
||||
secretName: cluster-secret
|
||||
mysqlConf:
|
||||
# read_only: 0 # mysql forms a single transaction for each sql statement, autocommit for each statement
|
||||
# automatic_sp_privileges: "ON" # automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine
|
||||
# auto_generate_certs: "ON" # Auto Generation of Certificate
|
||||
# auto_increment_increment: 1 # Auto Incrementing value from +1
|
||||
# auto_increment_offset: 1 # Auto Increment Offset
|
||||
# binlog-format: "STATEMENT" # contains various options such ROW(SLOW,SAFE) STATEMENT(FAST,UNSAFE), MIXED(combination of both)
|
||||
# wait_timeout: 31536000 # 28800 number of seconds the server waits for activity on a non-interactive connection before closing it, You might encounter MySQL server has gone away error, you then tweak this value acccordingly
|
||||
# interactive_timeout: 28800 # The number of seconds the server waits for activity on an interactive connection before closing it.
|
||||
# max_allowed_packet: "512M" # Maximum size of MYSQL Network protocol packet that the server can create or read 4MB, 8MB, 16MB, 32MB
|
||||
# max-binlog-size: 1073741824 # binary logs contains the events that describe database changes, this parameter describe size for the bin_log file.
|
||||
# log_output: "TABLE" # Format in which the logout will be dumped
|
||||
# master-info-repository: "TABLE" # Format in which the master info will be dumped
|
||||
# relay_log_info_repository: "TABLE" # Format in which the relay info will be dumped
|
||||
volumeSpec:
|
||||
persistentVolumeClaim:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
orchestrator:
|
||||
# persistence:
|
||||
# enabled: false
|
||||
ingress:
|
||||
enable: false
|
||||
hosts:
|
||||
- host: db.viktorbarzin.me
|
||||
paths:
|
||||
- path: /
|
||||
tls:
|
||||
- secretName: ${secretName}
|
||||
hosts:
|
||||
- db.viktorbarzin.me
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# Use the PostGIS image as the base
|
||||
FROM pgvector/pgvector:0.8.0-pg16 as binary
|
||||
FROM postgis/postgis:16-master
|
||||
COPY --from=binary /pgvecto-rs-binary-release.deb /tmp/vectors.deb
|
||||
RUN apt-get install -y /tmp/vectors.deb && rm -f /tmp/vectors.deb
|
||||
|
||||
# Install necessary packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
libpq-dev \
|
||||
wget \
|
||||
git \
|
||||
postgresql-server-dev-16 \
|
||||
postgresql-16-pgvector \
|
||||
# Clean up to reduce layer size
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& cd /tmp \
|
||||
&& git clone --branch v0.8.0 https://github.com/pgvector/pgvector.git \
|
||||
&& cd pgvector \
|
||||
&& make \
|
||||
&& make install \
|
||||
# Clean up unnecessary files
|
||||
&& cd - \
|
||||
&& apt-get purge -y --auto-remove build-essential postgresql-server-dev-16 libpq-dev wget git \
|
||||
&& rm -rf /tmp/pgvector
|
||||
|
||||
# Copy initialization scripts
|
||||
#COPY ./docker-entrypoint-initdb.d/ /docker-entrypoint-initdb.d/
|
||||
CMD ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", "search_path=\"$user\", public, vectors", "-c", "logging_collector=on"]
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# terraform {
|
||||
# required_providers {
|
||||
# kubectl = {
|
||||
# source = "gavinbunney/kubectl"
|
||||
# version = ">= 1.10.0"
|
||||
# }
|
||||
# }
|
||||
# required_version = ">= 0.13"
|
||||
# }
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
|
||||
variable "tls_secret_name" {}
|
||||
variable "tier" { type = string }
|
||||
variable "headscale_config" {}
|
||||
variable "headscale_acl" {}
|
||||
variable "nfs_server" { type = string }
|
||||
variable "homepage_token" {
|
||||
type = string
|
||||
default = ""
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "headscale" {
|
||||
metadata {
|
||||
name = "headscale"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "headscale-data"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/headscale"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "headscale" {
|
||||
metadata {
|
||||
name = "headscale"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
labels = {
|
||||
app = "headscale"
|
||||
tier = var.tier
|
||||
# scare to try but probably non-http will fail
|
||||
# "istio-injection" : "enabled"
|
||||
}
|
||||
|
||||
annotations = {
|
||||
"reloader.stakater.com/search" = "true"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
strategy {
|
||||
type = "Recreate"
|
||||
}
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "headscale"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "headscale"
|
||||
}
|
||||
annotations = {
|
||||
"diun.enable" = "true"
|
||||
"diun.include_tags" = "^\\d+(?:\\.\\d+)?(?:\\.\\d+)?$"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
image = "headscale/headscale:0.28.0"
|
||||
# image = "headscale/headscale:0.28.0-debug" # -debug is for debug images
|
||||
name = "headscale"
|
||||
command = ["headscale", "serve"]
|
||||
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "50m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
}
|
||||
|
||||
port {
|
||||
container_port = 8080
|
||||
}
|
||||
port {
|
||||
container_port = 9090
|
||||
}
|
||||
port {
|
||||
container_port = 41641
|
||||
}
|
||||
|
||||
liveness_probe {
|
||||
http_get {
|
||||
path = "/health"
|
||||
port = 8080
|
||||
}
|
||||
initial_delay_seconds = 15
|
||||
period_seconds = 30
|
||||
timeout_seconds = 5
|
||||
failure_threshold = 5
|
||||
}
|
||||
readiness_probe {
|
||||
http_get {
|
||||
path = "/health"
|
||||
port = 8080
|
||||
}
|
||||
initial_delay_seconds = 5
|
||||
period_seconds = 30
|
||||
timeout_seconds = 5
|
||||
failure_threshold = 3
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "config-volume"
|
||||
mount_path = "/etc/headscale"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
mount_path = "/mnt"
|
||||
name = "nfs-config"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "config-volume"
|
||||
config_map {
|
||||
name = "headscale-config"
|
||||
items {
|
||||
key = "config.yaml"
|
||||
path = "config.yaml"
|
||||
}
|
||||
items {
|
||||
key = "acl.yaml"
|
||||
path = "acl.yaml"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "nfs-config"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_data.claim_name
|
||||
}
|
||||
}
|
||||
# container {
|
||||
# image = "simcu/headscale-ui:0.1.4"
|
||||
# name = "headscale-ui"
|
||||
# port {
|
||||
# container_port = 80
|
||||
# }
|
||||
# }
|
||||
container {
|
||||
image = "ghcr.io/gurucomputing/headscale-ui:latest"
|
||||
# image = "ghcr.io/tale/headplane:0.3.2"
|
||||
name = "headscale-ui"
|
||||
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "25m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
}
|
||||
|
||||
port {
|
||||
container_port = 8081
|
||||
# container_port = 3000
|
||||
}
|
||||
env {
|
||||
name = "HTTP_PORT"
|
||||
value = "8081"
|
||||
}
|
||||
# env {
|
||||
# name = "HTTPS_PORT"
|
||||
# value = "8082"
|
||||
# }
|
||||
env {
|
||||
name = "HEADSCALE_URL"
|
||||
value = "http://localhost:8080"
|
||||
}
|
||||
env {
|
||||
name = "COOKIE_SECRET"
|
||||
value = "kekekekke"
|
||||
}
|
||||
env {
|
||||
name = "ROOT_API_KEY"
|
||||
value = "kekekekeke"
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "kubernetes_service" "headscale" {
|
||||
metadata {
|
||||
name = "headscale"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
labels = {
|
||||
"app" = "headscale"
|
||||
}
|
||||
annotations = {
|
||||
"prometheus.io/scrape" = "true"
|
||||
"prometheus.io/port" = "9090"
|
||||
}
|
||||
# annotations = {
|
||||
# "metallb.universe.tf/allow-shared-ip" : "shared"
|
||||
# }
|
||||
}
|
||||
|
||||
spec {
|
||||
# type = "LoadBalancer"
|
||||
# external_traffic_policy = "Cluster"
|
||||
selector = {
|
||||
app = "headscale"
|
||||
|
||||
}
|
||||
port {
|
||||
name = "headscale"
|
||||
port = "8080"
|
||||
protocol = "TCP"
|
||||
}
|
||||
port {
|
||||
name = "headscale-ui"
|
||||
port = "80"
|
||||
target_port = 8081
|
||||
# target_port = 3000
|
||||
protocol = "TCP"
|
||||
}
|
||||
port {
|
||||
name = "metrics"
|
||||
port = "9090"
|
||||
protocol = "TCP"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
name = "headscale"
|
||||
port = 8080
|
||||
tls_secret_name = var.tls_secret_name
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "Headscale"
|
||||
"gethomepage.dev/description" = "VPN mesh network"
|
||||
"gethomepage.dev/icon" = "headscale.png"
|
||||
"gethomepage.dev/group" = "Identity & Security"
|
||||
"gethomepage.dev/pod-selector" = ""
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress-ui" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
name = "headscale-ui"
|
||||
host = "headscale"
|
||||
service_name = "headscale"
|
||||
port = 8081
|
||||
ingress_path = ["/web"]
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "headscale-server" {
|
||||
metadata {
|
||||
name = "headscale-server"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
labels = {
|
||||
"app" = "headscale"
|
||||
}
|
||||
annotations = {
|
||||
"metallb.io/loadBalancerIPs" = "10.0.20.200"
|
||||
"metallb.io/allow-shared-ip" = "shared"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
type = "LoadBalancer"
|
||||
external_traffic_policy = "Cluster"
|
||||
selector = {
|
||||
app = "headscale"
|
||||
|
||||
}
|
||||
# port {
|
||||
# name = "headscale-tcp"
|
||||
# port = "41641"
|
||||
# protocol = "TCP"
|
||||
# }
|
||||
port {
|
||||
name = "headscale-udp"
|
||||
port = "41641"
|
||||
protocol = "UDP"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "headscale-config" {
|
||||
metadata {
|
||||
name = "headscale-config"
|
||||
namespace = kubernetes_namespace.headscale.metadata[0].name
|
||||
|
||||
annotations = {
|
||||
"reloader.stakater.com/match" = "true"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
"config.yaml" = var.headscale_config
|
||||
"acl.yaml" = var.headscale_acl
|
||||
}
|
||||
}
|
||||
|
|
@ -1,274 +0,0 @@
|
|||
# Module to run some infra-specific things like updating the public ip
|
||||
variable "git_user" {}
|
||||
variable "git_token" {}
|
||||
variable "technitium_username" {}
|
||||
variable "technitium_password" {}
|
||||
variable "nfs_server" { type = string }
|
||||
|
||||
|
||||
# DISABLED WHILST USING CLOUDFLARE NS
|
||||
# resource "kubernetes_cron_job_v1" "update-public-ip" {
|
||||
# metadata {
|
||||
# name = "update-public-ip"
|
||||
# namespace = "default"
|
||||
# }
|
||||
# spec {
|
||||
# schedule = "*/5 * * * *"
|
||||
# successful_jobs_history_limit = 1
|
||||
# failed_jobs_history_limit = 1
|
||||
# concurrency_policy = "Forbid"
|
||||
# job_template {
|
||||
# metadata {
|
||||
# name = "update-public-ip"
|
||||
# }
|
||||
# spec {
|
||||
# template {
|
||||
# metadata {
|
||||
# name = "update-public-ip"
|
||||
# }
|
||||
# spec {
|
||||
# priority_class_name = "system-cluster-critical"
|
||||
# container {
|
||||
# name = "update-public-ip"
|
||||
# image = "viktorbarzin/infra"
|
||||
# command = ["./infra_cli"]
|
||||
# args = ["-use-case", "update-public-ip"]
|
||||
|
||||
# env {
|
||||
# name = "GIT_USER"
|
||||
# value = var.git_user
|
||||
# }
|
||||
# env {
|
||||
# name = "GIT_TOKEN"
|
||||
# value = var.git_token
|
||||
# }
|
||||
# env {
|
||||
# name = "TECHNITIUM_USERNAME"
|
||||
# value = var.technitium_username
|
||||
# }
|
||||
# env {
|
||||
# name = "TECHNITIUM_PASSWORD"
|
||||
# value = var.technitium_password
|
||||
# }
|
||||
# }
|
||||
# restart_policy = "Never"
|
||||
# # service_account_name = "descheduler-sa"
|
||||
# # volume {
|
||||
# # name = "policy-volume"
|
||||
# # config_map {
|
||||
# # name = "policy-configmap"
|
||||
# # }
|
||||
# # }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
module "nfs_etcd_backup" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "infra-etcd-backup"
|
||||
namespace = "default"
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/etcd-backup"
|
||||
}
|
||||
|
||||
# # backup etcd
|
||||
resource "kubernetes_cron_job_v1" "backup-etcd" {
|
||||
metadata {
|
||||
name = "backup-etcd"
|
||||
namespace = "default"
|
||||
}
|
||||
spec {
|
||||
schedule = "0 0 * * *"
|
||||
successful_jobs_history_limit = 1
|
||||
failed_jobs_history_limit = 1
|
||||
concurrency_policy = "Forbid"
|
||||
job_template {
|
||||
metadata {
|
||||
name = "backup-etcd"
|
||||
}
|
||||
spec {
|
||||
template {
|
||||
metadata {
|
||||
name = "backup-etcd"
|
||||
}
|
||||
spec {
|
||||
node_name = "k8s-master"
|
||||
priority_class_name = "system-cluster-critical"
|
||||
host_network = true
|
||||
container {
|
||||
name = "backup-etcd"
|
||||
image = "registry.k8s.io/etcd:3.5.21-0"
|
||||
command = ["/bin/sh", "-c"]
|
||||
args = ["ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /backup/etcd-snapshot-$(date +%Y%m%d-%H%M%S).db"]
|
||||
env {
|
||||
name = "ETCDCTL_API"
|
||||
value = "3"
|
||||
}
|
||||
volume_mount {
|
||||
mount_path = "/backup"
|
||||
name = "backup"
|
||||
}
|
||||
volume_mount {
|
||||
mount_path = "/etc/kubernetes/pki/etcd"
|
||||
name = "etcd-certs"
|
||||
read_only = true
|
||||
}
|
||||
}
|
||||
container {
|
||||
name = "backup-purge"
|
||||
image = "busybox:1.31.1"
|
||||
command = ["/bin/sh"]
|
||||
args = ["-c", "find /backup -type f -mtime +30 -name '*.db' -exec rm -- '{}' \\;"]
|
||||
|
||||
volume_mount {
|
||||
mount_path = "/backup"
|
||||
name = "backup"
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "backup"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_etcd_backup.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "etcd-certs"
|
||||
host_path {
|
||||
path = "/etc/kubernetes/pki/etcd"
|
||||
type = "DirectoryOrCreate"
|
||||
}
|
||||
}
|
||||
restart_policy = "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Weekly etcd defragmentation — prevents fragmentation buildup that causes slow requests
|
||||
resource "kubernetes_cron_job_v1" "defrag-etcd" {
|
||||
metadata {
|
||||
name = "defrag-etcd"
|
||||
namespace = "default"
|
||||
}
|
||||
spec {
|
||||
schedule = "0 3 * * 0"
|
||||
successful_jobs_history_limit = 1
|
||||
failed_jobs_history_limit = 1
|
||||
concurrency_policy = "Forbid"
|
||||
job_template {
|
||||
metadata {
|
||||
name = "defrag-etcd"
|
||||
}
|
||||
spec {
|
||||
template {
|
||||
metadata {
|
||||
name = "defrag-etcd"
|
||||
}
|
||||
spec {
|
||||
node_name = "k8s-master"
|
||||
priority_class_name = "system-cluster-critical"
|
||||
host_network = true
|
||||
container {
|
||||
name = "defrag-etcd"
|
||||
image = "registry.k8s.io/etcd:3.5.21-0"
|
||||
command = ["etcdctl"]
|
||||
args = ["--endpoints=https://127.0.0.1:2379", "--cacert=/etc/kubernetes/pki/etcd/ca.crt", "--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt", "--key=/etc/kubernetes/pki/etcd/healthcheck-client.key", "--command-timeout=60s", "defrag"]
|
||||
env {
|
||||
name = "ETCDCTL_API"
|
||||
value = "3"
|
||||
}
|
||||
volume_mount {
|
||||
mount_path = "/etc/kubernetes/pki/etcd"
|
||||
name = "etcd-certs"
|
||||
read_only = true
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "etcd-certs"
|
||||
host_path {
|
||||
path = "/etc/kubernetes/pki/etcd"
|
||||
type = "DirectoryOrCreate"
|
||||
}
|
||||
}
|
||||
restart_policy = "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Clean up evicted/failed pods cluster-wide daily
|
||||
resource "kubernetes_cron_job_v1" "cleanup-failed-pods" {
|
||||
metadata {
|
||||
name = "cleanup-failed-pods"
|
||||
namespace = "default"
|
||||
}
|
||||
spec {
|
||||
schedule = "0 2 * * *"
|
||||
successful_jobs_history_limit = 1
|
||||
failed_jobs_history_limit = 1
|
||||
concurrency_policy = "Forbid"
|
||||
job_template {
|
||||
metadata {
|
||||
name = "cleanup-failed-pods"
|
||||
}
|
||||
spec {
|
||||
template {
|
||||
metadata {
|
||||
name = "cleanup-failed-pods"
|
||||
}
|
||||
spec {
|
||||
service_account_name = kubernetes_service_account.cleanup_sa.metadata[0].name
|
||||
container {
|
||||
name = "cleanup"
|
||||
image = "bitnami/kubectl:latest"
|
||||
command = ["/bin/sh", "-c", "kubectl delete pods -A --field-selector=status.phase=Failed --ignore-not-found"]
|
||||
}
|
||||
restart_policy = "Never"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "cleanup_sa" {
|
||||
metadata {
|
||||
name = "failed-pod-cleanup"
|
||||
namespace = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role" "cleanup_role" {
|
||||
metadata {
|
||||
name = "failed-pod-cleanup"
|
||||
}
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["pods"]
|
||||
verbs = ["list", "delete"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "cleanup_binding" {
|
||||
metadata {
|
||||
name = "failed-pod-cleanup"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = kubernetes_cluster_role.cleanup_role.metadata[0].name
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = kubernetes_service_account.cleanup_sa.metadata[0].name
|
||||
namespace = "default"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
resource "kubernetes_namespace" "iscsi_csi" {
|
||||
metadata {
|
||||
name = "iscsi-csi"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "democratic_csi" {
|
||||
namespace = kubernetes_namespace.iscsi_csi.metadata[0].name
|
||||
create_namespace = false
|
||||
name = "democratic-csi-iscsi"
|
||||
atomic = true
|
||||
timeout = 300
|
||||
|
||||
repository = "https://democratic-csi.github.io/charts/"
|
||||
chart = "democratic-csi"
|
||||
|
||||
values = [yamlencode({
|
||||
csiDriver = {
|
||||
name = "org.democratic-csi.iscsi"
|
||||
}
|
||||
|
||||
storageClasses = [{
|
||||
name = "iscsi-truenas"
|
||||
defaultClass = false
|
||||
reclaimPolicy = "Retain"
|
||||
volumeBindingMode = "Immediate"
|
||||
allowVolumeExpansion = true
|
||||
parameters = {
|
||||
fsType = "ext4"
|
||||
}
|
||||
mountOptions = []
|
||||
}]
|
||||
|
||||
controller = {
|
||||
replicas = 2
|
||||
driver = {
|
||||
resources = {
|
||||
requests = { cpu = "25m", memory = "192Mi" }
|
||||
limits = { memory = "192Mi" }
|
||||
}
|
||||
}
|
||||
externalProvisioner = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "64Mi" }
|
||||
limits = { memory = "64Mi" }
|
||||
}
|
||||
}
|
||||
externalAttacher = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "64Mi" }
|
||||
limits = { memory = "64Mi" }
|
||||
}
|
||||
}
|
||||
externalResizer = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "64Mi" }
|
||||
limits = { memory = "64Mi" }
|
||||
}
|
||||
}
|
||||
externalSnapshotter = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "80Mi" }
|
||||
limits = { memory = "80Mi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# csiProxy is a top-level chart key, NOT nested under controller/node
|
||||
csiProxy = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "32Mi" }
|
||||
limits = { memory = "32Mi" }
|
||||
}
|
||||
}
|
||||
|
||||
node = {
|
||||
driver = {
|
||||
resources = {
|
||||
requests = { cpu = "25m", memory = "192Mi" }
|
||||
limits = { memory = "192Mi" }
|
||||
}
|
||||
}
|
||||
driverRegistrar = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "32Mi" }
|
||||
limits = { memory = "32Mi" }
|
||||
}
|
||||
}
|
||||
cleanup = {
|
||||
resources = {
|
||||
requests = { cpu = "5m", memory = "32Mi" }
|
||||
limits = { memory = "32Mi" }
|
||||
}
|
||||
}
|
||||
|
||||
hostPID = true
|
||||
hostPath = "/lib/modules"
|
||||
}
|
||||
|
||||
driver = {
|
||||
config = {
|
||||
driver = "freenas-iscsi"
|
||||
|
||||
instance_id = "truenas-iscsi"
|
||||
|
||||
httpConnection = {
|
||||
protocol = "http"
|
||||
host = var.truenas_host
|
||||
port = 80
|
||||
apiKey = var.truenas_api_key
|
||||
}
|
||||
|
||||
sshConnection = {
|
||||
host = var.truenas_host
|
||||
port = 22
|
||||
username = "root"
|
||||
privateKey = var.truenas_ssh_private_key
|
||||
}
|
||||
|
||||
zfs = {
|
||||
datasetParentName = "main/iscsi"
|
||||
detachedSnapshotsDatasetParentName = "main/iscsi-snaps"
|
||||
}
|
||||
|
||||
iscsi = {
|
||||
targetPortal = "${var.truenas_host}:3260"
|
||||
namePrefix = "csi-"
|
||||
nameSuffix = ""
|
||||
targetGroups = [{
|
||||
targetGroupPortalGroup = 1
|
||||
targetGroupInitiatorGroup = 1
|
||||
targetGroupAuthType = "None"
|
||||
}]
|
||||
extentInsecureTpc = true
|
||||
extentXenCompat = false
|
||||
extentDisablePhysicalBlocksize = true
|
||||
extentBlocksize = 512
|
||||
extentRpm = "SSD"
|
||||
extentAvailThreshold = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
})]
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
variable "tier" { type = string }
|
||||
variable "truenas_host" { type = string }
|
||||
variable "truenas_api_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "truenas_ssh_private_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
node_modules
|
||||
|
||||
# Output
|
||||
.output
|
||||
.vercel
|
||||
.netlify
|
||||
.wrangler
|
||||
/.svelte-kit
|
||||
/build
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Env
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.test
|
||||
|
||||
# Vite
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
|
|
@ -1 +0,0 @@
|
|||
engine-strict=true
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
FROM node:22-alpine AS build
|
||||
WORKDIR /app
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
|
||||
FROM node:22-alpine
|
||||
WORKDIR /app
|
||||
COPY --from=build /app/build ./build
|
||||
COPY --from=build /app/package.json ./
|
||||
COPY --from=build /app/node_modules ./node_modules
|
||||
ENV PORT=3000
|
||||
EXPOSE 3000
|
||||
CMD ["node", "build"]
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
# sv
|
||||
|
||||
Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli).
|
||||
|
||||
## Creating a project
|
||||
|
||||
If you're seeing this, you've probably already done this step. Congrats!
|
||||
|
||||
```sh
|
||||
# create a new project
|
||||
npx sv create my-app
|
||||
```
|
||||
|
||||
To recreate this project with the same configuration:
|
||||
|
||||
```sh
|
||||
# recreate this project
|
||||
npx sv create --template minimal --types ts --install npm .
|
||||
```
|
||||
|
||||
## Developing
|
||||
|
||||
Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
|
||||
|
||||
```sh
|
||||
npm run dev
|
||||
|
||||
# or start the server and open the app in a new browser tab
|
||||
npm run dev -- --open
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
To create a production version of your app:
|
||||
|
||||
```sh
|
||||
npm run build
|
||||
```
|
||||
|
||||
You can preview the production build with `npm run preview`.
|
||||
|
||||
> To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment.
|
||||
1844
stacks/platform/modules/k8s-portal/files/package-lock.json
generated
1844
stacks/platform/modules/k8s-portal/files/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,24 +0,0 @@
|
|||
{
|
||||
"name": "files",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite dev",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"prepare": "svelte-kit sync || echo ''",
|
||||
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
|
||||
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@sveltejs/adapter-auto": "^7.0.0",
|
||||
"@sveltejs/adapter-node": "^5.5.3",
|
||||
"@sveltejs/kit": "^2.50.2",
|
||||
"@sveltejs/vite-plugin-svelte": "^6.2.4",
|
||||
"svelte": "^5.49.2",
|
||||
"svelte-check": "^4.3.6",
|
||||
"typescript": "^5.9.3",
|
||||
"vite": "^7.3.1"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// See https://svelte.dev/docs/kit/types#app.d.ts
|
||||
// for information about these interfaces
|
||||
declare global {
|
||||
namespace App {
|
||||
// interface Error {}
|
||||
// interface Locals {}
|
||||
// interface PageData {}
|
||||
// interface PageState {}
|
||||
// interface Platform {}
|
||||
}
|
||||
}
|
||||
|
||||
export {};
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
%sveltekit.head%
|
||||
</head>
|
||||
<body data-sveltekit-preload-data="hover">
|
||||
<div style="display: contents">%sveltekit.body%</div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
@ -1 +0,0 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="107" height="128" viewBox="0 0 107 128"><title>svelte-logo</title><path d="M94.157 22.819c-10.4-14.885-30.94-19.297-45.792-9.835L22.282 29.608A29.92 29.92 0 0 0 8.764 49.65a31.5 31.5 0 0 0 3.108 20.231 30 30 0 0 0-4.477 11.183 31.9 31.9 0 0 0 5.448 24.116c10.402 14.887 30.942 19.297 45.791 9.835l26.083-16.624A29.92 29.92 0 0 0 98.235 78.35a31.53 31.53 0 0 0-3.105-20.232 30 30 0 0 0 4.474-11.182 31.88 31.88 0 0 0-5.447-24.116" style="fill:#ff3e00"/><path d="M45.817 106.582a20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.503 18 18 0 0 1 .624-2.435l.49-1.498 1.337.981a33.6 33.6 0 0 0 10.203 5.098l.97.294-.09.968a5.85 5.85 0 0 0 1.052 3.878 6.24 6.24 0 0 0 6.695 2.485 5.8 5.8 0 0 0 1.603-.704L69.27 76.28a5.43 5.43 0 0 0 2.45-3.631 5.8 5.8 0 0 0-.987-4.371 6.24 6.24 0 0 0-6.698-2.487 5.7 5.7 0 0 0-1.6.704l-9.953 6.345a19 19 0 0 1-5.296 2.326 20.72 20.72 0 0 1-22.237-8.243 19.17 19.17 0 0 1-3.277-14.502 17.99 17.99 0 0 1 8.13-12.052l26.081-16.623a19 19 0 0 1 5.3-2.329 20.72 20.72 0 0 1 22.237 8.243 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-.624 2.435l-.49 1.498-1.337-.98a33.6 33.6 0 0 0-10.203-5.1l-.97-.294.09-.968a5.86 5.86 0 0 0-1.052-3.878 6.24 6.24 0 0 0-6.696-2.485 5.8 5.8 0 0 0-1.602.704L37.73 51.72a5.42 5.42 0 0 0-2.449 3.63 5.79 5.79 0 0 0 .986 4.372 6.24 6.24 0 0 0 6.698 2.486 5.8 5.8 0 0 0 1.602-.704l9.952-6.342a19 19 0 0 1 5.295-2.328 20.72 20.72 0 0 1 22.237 8.242 19.17 19.17 0 0 1 3.277 14.503 18 18 0 0 1-8.13 12.053l-26.081 16.622a19 19 0 0 1-5.3 2.328" style="fill:#fff"/></svg>
|
||||
|
Before Width: | Height: | Size: 1.5 KiB |
|
|
@ -1 +0,0 @@
|
|||
// place files you want to import through the `$lib` alias in this folder.
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
<script lang="ts">
|
||||
import favicon from '$lib/assets/favicon.svg';
|
||||
import { page } from '$app/stores';
|
||||
|
||||
let { children } = $props();
|
||||
</script>
|
||||
|
||||
<svelte:head>
|
||||
<link rel="icon" href={favicon} />
|
||||
</svelte:head>
|
||||
|
||||
<nav>
|
||||
<div class="nav-inner">
|
||||
<a href="/" class="brand">K8s Portal</a>
|
||||
<div class="links">
|
||||
<a href="/onboarding" class:active={$page.url.pathname === '/onboarding'}>Getting Started</a>
|
||||
<a href="/architecture" class:active={$page.url.pathname === '/architecture'}>Architecture</a>
|
||||
<a href="/services" class:active={$page.url.pathname === '/services'}>Services</a>
|
||||
<a href="/contributing" class:active={$page.url.pathname === '/contributing'}>Contributing</a>
|
||||
<a href="/troubleshooting" class:active={$page.url.pathname === '/troubleshooting'}>Help</a>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
{@render children()}
|
||||
|
||||
<style>
|
||||
nav {
|
||||
background: #1a1a2e;
|
||||
padding: 0.75rem 1rem;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
z-index: 100;
|
||||
}
|
||||
.nav-inner {
|
||||
max-width: 768px;
|
||||
margin: 0 auto;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.brand {
|
||||
color: #e0e0e0;
|
||||
text-decoration: none;
|
||||
font-weight: 700;
|
||||
font-size: 1.1rem;
|
||||
}
|
||||
.links {
|
||||
display: flex;
|
||||
gap: 1rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.links a {
|
||||
color: #a0a0c0;
|
||||
text-decoration: none;
|
||||
font-size: 0.9rem;
|
||||
padding: 0.25rem 0;
|
||||
}
|
||||
.links a:hover, .links a.active {
|
||||
color: #ffffff;
|
||||
border-bottom: 2px solid #4fc3f7;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
import type { PageServerLoad } from './$types';
|
||||
import { readFileSync } from 'fs';
|
||||
|
||||
interface UserRole {
|
||||
role: string;
|
||||
namespaces: string[];
|
||||
}
|
||||
|
||||
export const load: PageServerLoad = async ({ request }) => {
|
||||
const email = request.headers.get('x-authentik-email') || 'unknown';
|
||||
const username = request.headers.get('x-authentik-username') || 'unknown';
|
||||
const groups = request.headers.get('x-authentik-groups') || '';
|
||||
|
||||
// Read user roles from ConfigMap-mounted file
|
||||
let userRole: UserRole = { role: 'unknown', namespaces: [] };
|
||||
try {
|
||||
const usersJson = readFileSync('/config/users.json', 'utf-8');
|
||||
const users = JSON.parse(usersJson);
|
||||
if (users[email]) {
|
||||
userRole = users[email];
|
||||
}
|
||||
} catch {
|
||||
// ConfigMap not mounted or parse error
|
||||
}
|
||||
|
||||
return {
|
||||
email,
|
||||
username,
|
||||
groups: groups.split('|').filter(Boolean),
|
||||
role: userRole.role,
|
||||
namespaces: userRole.namespaces
|
||||
};
|
||||
};
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
<script lang="ts">
|
||||
let { data } = $props();
|
||||
</script>
|
||||
|
||||
<main>
|
||||
<h1>Kubernetes Access Portal</h1>
|
||||
|
||||
<div class="callout warning">
|
||||
<strong>VPN Required</strong> — The cluster is on a private network. You need Headscale VPN access before kubectl will work.
|
||||
<a href="/onboarding">See the Getting Started guide</a> for VPN setup instructions.
|
||||
</div>
|
||||
|
||||
<section>
|
||||
<h2>Your Identity</h2>
|
||||
<p><strong>Username:</strong> {data.username}</p>
|
||||
<p><strong>Email:</strong> {data.email}</p>
|
||||
<p><strong>Role:</strong> {data.role}</p>
|
||||
{#if data.namespaces.length > 0}
|
||||
<p><strong>Namespaces:</strong> {data.namespaces.join(', ')}</p>
|
||||
{/if}
|
||||
</section>
|
||||
|
||||
{#if data.role === 'namespace-owner'}
|
||||
<section>
|
||||
<h2>Your Namespace</h2>
|
||||
<p><strong>Assigned namespaces:</strong> {data.namespaces.join(', ')}</p>
|
||||
|
||||
<h3>Quick Commands</h3>
|
||||
<pre>
|
||||
# Check your pods
|
||||
kubectl get pods -n {data.namespaces[0]}
|
||||
|
||||
# View quota usage
|
||||
kubectl describe resourcequota -n {data.namespaces[0]}
|
||||
|
||||
# Log into Vault
|
||||
vault login -method=oidc
|
||||
|
||||
# Store a secret
|
||||
vault kv put secret/{data.username}/myapp KEY=value
|
||||
|
||||
# Get K8s deploy token
|
||||
vault write kubernetes/creds/{data.namespaces[0]}-deployer \
|
||||
kubernetes_namespace={data.namespaces[0]}</pre>
|
||||
</section>
|
||||
{/if}
|
||||
|
||||
<section>
|
||||
<h2>Get Started</h2>
|
||||
<ol>
|
||||
{#if data.role === 'namespace-owner'}
|
||||
<li><a href="/onboarding?role=namespace-owner">Complete the namespace-owner onboarding guide</a></li>
|
||||
{:else}
|
||||
<li><a href="/onboarding">Complete the onboarding guide</a> (VPN, kubectl, git)</li>
|
||||
{/if}
|
||||
<li><a href="/setup">Install kubectl and kubelogin</a></li>
|
||||
<li><a href="/download">Download your kubeconfig</a></li>
|
||||
<li>Run <code>kubectl get namespaces</code> to verify access</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Resources</h2>
|
||||
<ul>
|
||||
<li><a href="/architecture">Architecture overview</a></li>
|
||||
<li><a href="/services">Service catalog</a></li>
|
||||
<li><a href="/contributing">How to contribute</a></li>
|
||||
<li><a href="/troubleshooting">Troubleshooting</a></li>
|
||||
</ul>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
main {
|
||||
max-width: 768px;
|
||||
margin: 2rem auto;
|
||||
padding: 0 1rem;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
line-height: 1.6;
|
||||
}
|
||||
code {
|
||||
background: #f0f0f0;
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
section {
|
||||
margin: 2rem 0;
|
||||
}
|
||||
.callout {
|
||||
padding: 1rem;
|
||||
border-radius: 6px;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
.callout.warning {
|
||||
background: #fff3cd;
|
||||
border-left: 4px solid #ffc107;
|
||||
}
|
||||
.callout a {
|
||||
color: #856404;
|
||||
font-weight: 600;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
<main class="content">
|
||||
<h1>Agent Bootstrap</h1>
|
||||
<p>Point any AI coding agent at this cluster and it can bootstrap itself automatically.</p>
|
||||
|
||||
<section>
|
||||
<h2>For AI Agents</h2>
|
||||
<p>Fetch the machine-readable bootstrap document:</p>
|
||||
<pre>curl -fsSL https://k8s-portal.viktorbarzin.me/agent</pre>
|
||||
<p>This returns a plain-text markdown document with everything an agent needs: setup commands, critical rules, secrets workflow, Terraform conventions, key file paths, and common operations.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Usage with Claude Code</h2>
|
||||
<pre>claude "$(curl -fsSL https://k8s-portal.viktorbarzin.me/agent)" "Deploy a new echo service"</pre>
|
||||
<p>Or within a session:</p>
|
||||
<ol>
|
||||
<li>Clone the repo: <code>git clone https://github.com/ViktorBarzin/infra.git && cd infra</code></li>
|
||||
<li>Start Claude Code: <code>claude</code></li>
|
||||
<li>Claude auto-reads <code>AGENTS.md</code> and <code>.claude/CLAUDE.md</code> from the repo</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Usage with Codex / Other Agents</h2>
|
||||
<ol>
|
||||
<li>Clone the repo and <code>cd</code> into it</li>
|
||||
<li>Run the setup script: <code>bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</code></li>
|
||||
<li>Start the agent — it will read <code>AGENTS.md</code> for instructions</li>
|
||||
</ol>
|
||||
<p>If the agent doesn't auto-read <code>AGENTS.md</code>, feed it the bootstrap doc:</p>
|
||||
<pre>curl -fsSL https://k8s-portal.viktorbarzin.me/agent</pre>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>What the Agent Gets</h2>
|
||||
<ul>
|
||||
<li>Quick-start commands (setup script, repo clone)</li>
|
||||
<li>Critical rules (no kubectl apply, no plaintext secrets, no NFS restart)</li>
|
||||
<li>Sealed Secrets workflow (kubeseal self-service)</li>
|
||||
<li>Terraform conventions (fileset pattern, tiers, storage, shared vars)</li>
|
||||
<li>Key file paths in the repo</li>
|
||||
<li>Common operations (deploy, fix pods, add secrets)</li>
|
||||
<li>Contributing workflow (branch, PR, review, CI)</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>The <code>/agent</code> Endpoint</h2>
|
||||
<p>The endpoint is <strong>unauthenticated</strong> — no login required. Agents can <code>curl</code> or <code>WebFetch</code> it directly without a browser session, just like the setup script.</p>
|
||||
<p>Content-Type: <code>text/plain</code> — no HTML parsing needed.</p>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
|
||||
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
section { margin: 2rem 0; }
|
||||
</style>
|
||||
|
|
@ -1,161 +0,0 @@
|
|||
import type { RequestHandler } from './$types';
|
||||
|
||||
const BOOTSTRAP_DOC = `# Infrastructure Cluster — AI Agent Bootstrap
|
||||
|
||||
> Fetch this document: \`curl -fsSL https://k8s-portal.viktorbarzin.me/agent\`
|
||||
|
||||
## Quick Start
|
||||
|
||||
\`\`\`bash
|
||||
# 1. Install tools (kubectl, kubelogin, kubeseal)
|
||||
bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)
|
||||
|
||||
# 2. Clone the infrastructure repo
|
||||
git clone https://github.com/ViktorBarzin/infra.git && cd infra
|
||||
|
||||
# 3. Verify cluster access (opens browser for OIDC login on first run)
|
||||
kubectl get namespaces
|
||||
\`\`\`
|
||||
|
||||
## Critical Rules (MUST FOLLOW)
|
||||
|
||||
- **ALL changes through Terraform/Terragrunt** — NEVER \`kubectl apply/edit/patch/delete\` for persistent changes. Read-only kubectl is fine.
|
||||
- **NEVER put secrets in plaintext** — use Sealed Secrets (\`kubeseal\`) or \`secrets.sops.json\` (SOPS-encrypted).
|
||||
- **NEVER restart NFS on TrueNAS** — causes cluster-wide mount failures across all pods.
|
||||
- **NEVER commit secrets** — triple-check before every commit.
|
||||
- **\`[ci skip]\` in commit messages** when changes were already applied locally.
|
||||
- **Ask before \`git push\`** — always confirm with the user first.
|
||||
|
||||
## Sealed Secrets (Self-Service)
|
||||
|
||||
You can manage your own secrets without SOPS access using \`kubeseal\`:
|
||||
|
||||
\`\`\`bash
|
||||
# 1. Create a sealed secret
|
||||
kubectl create secret generic <name> \\
|
||||
--from-literal=key=value -n <namespace> \\
|
||||
--dry-run=client -o yaml | \\
|
||||
kubeseal --controller-name sealed-secrets \\
|
||||
--controller-namespace sealed-secrets -o yaml > sealed-<name>.yaml
|
||||
|
||||
# 2. Place the file in the stack directory: stacks/<service>/sealed-<name>.yaml
|
||||
|
||||
# 3. Ensure the stack's main.tf has the fileset block (add if missing):
|
||||
\`\`\`
|
||||
|
||||
\`\`\`hcl
|
||||
resource "kubernetes_manifest" "sealed_secrets" {
|
||||
for_each = fileset(path.module, "sealed-*.yaml")
|
||||
manifest = yamldecode(file("\${path.module}/\${each.value}"))
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
\`\`\`bash
|
||||
# 4. Push to PR — CI runs terragrunt apply — controller decrypts into real K8s Secrets
|
||||
\`\`\`
|
||||
|
||||
- Files MUST match the \`sealed-*.yaml\` glob pattern.
|
||||
- Only the in-cluster controller has the private key. \`kubeseal\` uses the public key — safe to distribute.
|
||||
- The \`kubernetes_manifest\` block is safe to add even with zero sealed-*.yaml files (empty for_each).
|
||||
|
||||
## SOPS Secrets (Admin-Only Fallback)
|
||||
|
||||
For secrets requiring admin access (shared infra passwords, API keys):
|
||||
- **\`secrets.sops.json\`** — SOPS-encrypted secrets (JSON format)
|
||||
- **Edit**: \`sops secrets.sops.json\` (opens $EDITOR, re-encrypts on save)
|
||||
- **Add**: \`sops set secrets.sops.json '["new_key"]' '"value"'\`
|
||||
- **Operators without SOPS keys**: comment on your PR asking Viktor to add the secret.
|
||||
|
||||
## Terraform Conventions
|
||||
|
||||
### Execution
|
||||
- **Apply a service**: \`scripts/tg apply --non-interactive\` (auto-decrypts SOPS secrets)
|
||||
- **Plan**: \`scripts/tg plan --non-interactive\`
|
||||
- **kubectl**: \`kubectl --kubeconfig $(pwd)/config\`
|
||||
- **Health check**: \`bash scripts/cluster_healthcheck.sh --quiet\`
|
||||
|
||||
### Key Paths
|
||||
| Path | Purpose |
|
||||
|------|---------|
|
||||
| \`stacks/<service>/main.tf\` | Service definition |
|
||||
| \`stacks/platform/modules/<module>/\` | Core infra modules (~22) |
|
||||
| \`modules/kubernetes/ingress_factory/\` | Standardized ingress (auth, rate limiting, anti-AI) |
|
||||
| \`modules/kubernetes/nfs_volume/\` | NFS volume module (CSI-backed, soft mount) |
|
||||
| \`config.tfvars\` | Non-secret configuration (plaintext) |
|
||||
| \`secrets.sops.json\` | All secrets (SOPS-encrypted JSON) |
|
||||
| \`scripts/cluster_healthcheck.sh\` | 25-check cluster health script |
|
||||
| \`AGENTS.md\` | Full AI agent instructions (auto-loaded by most agents) |
|
||||
|
||||
### Tier System
|
||||
\`0-core\` | \`1-cluster\` | \`2-gpu\` | \`3-edge\` | \`4-aux\`
|
||||
|
||||
Kyverno auto-generates LimitRange + ResourceQuota per namespace based on tier label.
|
||||
- Containers without explicit \`resources {}\` get default limits (256Mi for edge/aux — causes OOMKill for heavy apps)
|
||||
- Always set explicit resources on containers that need more than defaults
|
||||
- Opt-out labels: \`resource-governance/custom-quota=true\` / \`resource-governance/custom-limitrange=true\`
|
||||
|
||||
### Storage
|
||||
- **NFS** (\`nfs-truenas\` StorageClass): For app data. Use the \`nfs_volume\` module.
|
||||
- **iSCSI** (\`iscsi-truenas\` StorageClass): For databases (PostgreSQL, MySQL).
|
||||
|
||||
### Shared Variables (never hardcode)
|
||||
\`var.nfs_server\`, \`var.redis_host\`, \`var.postgresql_host\`, \`var.mysql_host\`, \`var.ollama_host\`, \`var.mail_host\`
|
||||
|
||||
## Architecture
|
||||
|
||||
- Terragrunt-based homelab managing a Kubernetes cluster (5 nodes, v1.34.2) on Proxmox VMs
|
||||
- 70+ services, each in \`stacks/<service>/\` with its own Terraform state
|
||||
- Core platform: \`stacks/platform/modules/\` (Traefik, Kyverno, monitoring, dbaas, sealed-secrets, etc.)
|
||||
- Public domain: \`viktorbarzin.me\` (Cloudflare) | Internal: \`viktorbarzin.lan\` (Technitium DNS)
|
||||
- CI/CD: Woodpecker CI — PRs run plan, merges to master auto-apply platform stack
|
||||
|
||||
## Common Operations
|
||||
|
||||
### Deploy a New Service
|
||||
1. Copy an existing stack as template: \`cp -r stacks/echo stacks/my-service\`
|
||||
2. Edit \`main.tf\` — update image, ports, ingress, resources
|
||||
3. Add DNS in \`config.tfvars\`
|
||||
4. Apply platform first if needed, then the service
|
||||
|
||||
### Fix Crashed Pods
|
||||
1. Run \`bash scripts/cluster_healthcheck.sh --quiet\`
|
||||
2. Safe to delete evicted/failed pods and CrashLoopBackOff pods with >10 restarts
|
||||
3. OOMKilled? Check \`kubectl describe limitrange tier-defaults -n <ns>\` and increase \`resources.limits.memory\`
|
||||
|
||||
### Add a Secret
|
||||
- **Self-service**: Use \`kubeseal\` (see Sealed Secrets section above)
|
||||
- **Admin**: \`sops set secrets.sops.json '["key"]' '"value"'\` then commit
|
||||
|
||||
## Contributing Workflow
|
||||
|
||||
1. Create a branch: \`git checkout -b fix/my-change\`
|
||||
2. Make changes in \`stacks/<service>/main.tf\`
|
||||
3. Push and open a PR: \`git push -u origin fix/my-change\`
|
||||
4. Viktor reviews and merges
|
||||
5. CI applies automatically — Slack notification when done
|
||||
|
||||
## Infrastructure Details
|
||||
|
||||
- **Proxmox**: 192.168.1.127 (Dell R730, 22c/44t, 142GB RAM)
|
||||
- **Nodes**: k8s-master (10.0.20.100), node1 (GPU, Tesla T4), node2-4
|
||||
- **GPU workloads**: \`node_selector = { "gpu": "true" }\` + toleration \`nvidia.com/gpu\`
|
||||
- **Pull-through cache**: 10.0.20.10 — use versioned image tags (cache serves stale :latest manifests)
|
||||
- **MySQL InnoDB Cluster**: 3 instances on iSCSI
|
||||
- **SMTP**: \`var.mail_host\` port 587 STARTTLS
|
||||
|
||||
## Further Reading
|
||||
|
||||
- Full agent instructions: \`AGENTS.md\` in the repo root
|
||||
- Patterns and examples: \`.claude/reference/patterns.md\`
|
||||
- Service catalog: \`.claude/reference/service-catalog.md\`
|
||||
- Onboarding guide: https://k8s-portal.viktorbarzin.me/onboarding
|
||||
`;
|
||||
|
||||
export const GET: RequestHandler = async () => {
|
||||
return new Response(BOOTSTRAP_DOC, {
|
||||
headers: {
|
||||
'Content-Type': 'text/plain; charset=utf-8',
|
||||
'Cache-Control': 'public, max-age=3600'
|
||||
}
|
||||
});
|
||||
};
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
<main class="content">
|
||||
<h1>Architecture</h1>
|
||||
|
||||
<section>
|
||||
<h2>Overview</h2>
|
||||
<p>The infrastructure runs on a single Dell R730 server (22 CPU cores, 142GB RAM) using Proxmox to manage virtual machines. Five of those VMs form a Kubernetes cluster that runs 70+ services.</p>
|
||||
<pre class="output">
|
||||
Proxmox (Dell R730)
|
||||
├── k8s-master (10.0.20.100) — control plane
|
||||
├── k8s-node1 (10.0.20.101) — GPU node (Tesla T4)
|
||||
├── k8s-node2 (10.0.20.102) — worker
|
||||
├── k8s-node3 (10.0.20.103) — worker
|
||||
├── k8s-node4 (10.0.20.104) — worker
|
||||
├── TrueNAS (10.0.10.15) — storage (NFS + iSCSI)
|
||||
└── pfSense (10.0.20.1) — firewall + gateway</pre>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Networking</h2>
|
||||
<ul>
|
||||
<li><strong>Public domain</strong>: <code>viktorbarzin.me</code> — managed by Cloudflare</li>
|
||||
<li><strong>Internal domain</strong>: <code>viktorbarzin.lan</code> — managed by Technitium DNS</li>
|
||||
<li><strong>Ingress</strong>: Cloudflare → Traefik → services</li>
|
||||
<li><strong>VPN</strong>: Headscale (self-hosted Tailscale)</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Storage</h2>
|
||||
<ul>
|
||||
<li><strong>NFS</strong> (<code>nfs-truenas</code>) — for app data (files, configs, media). Stored on TrueNAS.</li>
|
||||
<li><strong>iSCSI</strong> (<code>iscsi-truenas</code>) — for databases (PostgreSQL, MySQL). Block storage.</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Service Tiers</h2>
|
||||
<p>Services are organized into tiers that control resource limits and restart priority:</p>
|
||||
<table>
|
||||
<thead><tr><th>Tier</th><th>Examples</th><th>Priority</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td><strong>0-core</strong></td><td>Traefik, DNS, VPN, Auth</td><td>Highest — never evicted</td></tr>
|
||||
<tr><td><strong>1-cluster</strong></td><td>Redis, Prometheus, CrowdSec</td><td>High</td></tr>
|
||||
<tr><td><strong>2-gpu</strong></td><td>Ollama, Immich ML, Whisper</td><td>Medium</td></tr>
|
||||
<tr><td><strong>3-edge</strong></td><td>Nextcloud, Paperless, Grafana</td><td>Normal</td></tr>
|
||||
<tr><td><strong>4-aux</strong></td><td>Dashy, PrivateBin, CyberChef</td><td>Low — evicted first under pressure</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Infrastructure as Code</h2>
|
||||
<p>Everything is managed with <strong>Terraform</strong> (via <strong>Terragrunt</strong>). Each service has its own stack:</p>
|
||||
<pre class="output">stacks/
|
||||
├── platform/ ← core infra (22 modules)
|
||||
├── url/ ← URL shortener (Shlink)
|
||||
├── immich/ ← photo library
|
||||
├── nextcloud/ ← file storage
|
||||
└── ... (70+ more)</pre>
|
||||
<p>Changes go through git: branch → PR → review → merge → CI applies automatically.</p>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
|
||||
.content pre.output { background: #f5f5f5; color: #333; }
|
||||
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
section { margin: 2rem 0; }
|
||||
table { border-collapse: collapse; width: 100%; }
|
||||
th, td { border: 1px solid #ddd; padding: 0.5rem; text-align: left; }
|
||||
th { background: #f5f5f5; }
|
||||
</style>
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
<main class="content">
|
||||
<h1>How to Contribute</h1>
|
||||
|
||||
<section>
|
||||
<h2>Workflow</h2>
|
||||
<ol>
|
||||
<li><strong>Create a branch</strong>: <code>git checkout -b fix/my-change</code></li>
|
||||
<li><strong>Make your changes</strong> in <code>stacks/<service>/main.tf</code></li>
|
||||
<li><strong>Push and open a PR</strong>: <code>git push -u origin fix/my-change</code></li>
|
||||
<li><strong>Viktor reviews</strong> and merges</li>
|
||||
<li><strong>CI applies</strong> automatically — Slack notification when done</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>What you CAN change</h2>
|
||||
<ul>
|
||||
<li>Service configurations (image tags, environment variables, resource limits)</li>
|
||||
<li>New services (add a new stack under <code>stacks/</code>)</li>
|
||||
<li>Ingress routes, health probes, replica counts</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>What needs Viktor's review</h2>
|
||||
<ul>
|
||||
<li>CI pipeline changes (<code>.woodpecker/</code>)</li>
|
||||
<li>Terragrunt configuration (<code>terragrunt.hcl</code>)</li>
|
||||
<li>Secrets configuration (<code>.sops.yaml</code>)</li>
|
||||
<li>Core platform modules (<code>stacks/platform/</code>)</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 class="danger-header">NEVER do these</h2>
|
||||
<div class="callout danger">
|
||||
<ul>
|
||||
<li><strong>Never <code>kubectl apply/edit/patch</code></strong> — all changes go through Terraform</li>
|
||||
<li><strong>Never put secrets in code</strong> — ask Viktor to add them to the encrypted secrets file</li>
|
||||
<li><strong>Never restart NFS on TrueNAS</strong> — causes cluster-wide mount failures</li>
|
||||
<li><strong>Never push directly to master</strong> — always use a PR</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Need a new secret?</h2>
|
||||
<p>Comment on your PR: "I need a database password for my-service." Viktor will add it to the encrypted secrets file and push to your branch.</p>
|
||||
<p>Then reference it in your Terraform: <code>var.my_service_db_password</code></p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Namespace Owner Workflow</h2>
|
||||
<p>If you are a namespace owner, you can deploy your own apps:</p>
|
||||
<ol>
|
||||
<li>Clone the infra repo: <code>git clone https://github.com/ViktorBarzin/infra.git</code></li>
|
||||
<li>Copy the template: <code>cp -r stacks/_template stacks/your-app</code></li>
|
||||
<li>Rename: <code>mv stacks/your-app/main.tf.example stacks/your-app/main.tf</code></li>
|
||||
<li>Edit <code>main.tf</code> — replace all <code><placeholders></code></li>
|
||||
<li>Store secrets in Vault: <code>vault kv put secret/your-username/your-app KEY=value</code></li>
|
||||
<li>Add your app domain to your <code>domains</code> list in Vault KV</li>
|
||||
<li>Submit a PR, get it reviewed</li>
|
||||
<li>After merge, admin runs <code>terragrunt apply</code></li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>CI Pipeline Template</h2>
|
||||
<p>Create a <code>.woodpecker.yml</code> in your app's Forgejo repo:</p>
|
||||
<pre>{`steps:
|
||||
- name: build
|
||||
image: woodpeckerci/plugin-docker-buildx
|
||||
settings:
|
||||
repo: your-dockerhub-user/myapp
|
||||
tag: ["\${CI_PIPELINE_NUMBER}", "latest"]
|
||||
username:
|
||||
from_secret: dockerhub-username
|
||||
password:
|
||||
from_secret: dockerhub-token
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: deploy
|
||||
image: hashicorp/vault:1.18.1
|
||||
commands:
|
||||
- export VAULT_ADDR=http://vault-active.vault.svc.cluster.local:8200
|
||||
- export VAULT_TOKEN=$(vault write -field=token auth/kubernetes/login
|
||||
role=ci jwt=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token))
|
||||
- KUBE_TOKEN=$(vault write -field=service_account_token
|
||||
kubernetes/creds/YOUR_NAMESPACE-deployer
|
||||
kubernetes_namespace=YOUR_NAMESPACE)
|
||||
- kubectl --server=https://kubernetes.default.svc
|
||||
--token=$KUBE_TOKEN
|
||||
--certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
-n YOUR_NAMESPACE set image deployment/myapp
|
||||
myapp=your-dockerhub-user/myapp:\${CI_PIPELINE_NUMBER}`}</pre>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Need a secret for your app?</h2>
|
||||
<p>As a namespace owner, you manage your own secrets in Vault:</p>
|
||||
<pre>vault kv put secret/your-username/your-app DB_PASSWORD=mysecret API_KEY=abc123</pre>
|
||||
<p>Then reference them in your Terraform using a <code>data "vault_kv_secret_v2"</code> block.</p>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
section { margin: 2rem 0; }
|
||||
.callout { padding: 1rem; border-radius: 6px; margin: 1rem 0; }
|
||||
.callout.danger { background: #f8d7da; border-left: 4px solid #dc3545; }
|
||||
.danger-header { color: #dc3545; }
|
||||
</style>
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
import type { RequestHandler } from './$types';
|
||||
import { readFileSync } from 'fs';
|
||||
|
||||
const CLUSTER_SERVER = 'https://10.0.20.100:6443';
|
||||
const OIDC_ISSUER = 'https://authentik.viktorbarzin.me/application/o/kubernetes/';
|
||||
const OIDC_CLIENT_ID = 'kubernetes';
|
||||
|
||||
export const GET: RequestHandler = async ({ request }) => {
|
||||
const email = request.headers.get('x-authentik-email') || 'user';
|
||||
|
||||
// Read CA cert from mounted ConfigMap
|
||||
let caCert = '';
|
||||
try {
|
||||
caCert = readFileSync('/config/ca.crt', 'utf-8');
|
||||
} catch {
|
||||
// CA cert not available
|
||||
}
|
||||
|
||||
const caCertBase64 = Buffer.from(caCert).toString('base64');
|
||||
const sanitizedEmail = email.replace(/[^a-zA-Z0-9@._-]/g, '');
|
||||
|
||||
const kubeconfig = `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ${CLUSTER_SERVER}
|
||||
certificate-authority-data: ${caCertBase64}
|
||||
name: home-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: home-cluster
|
||||
user: oidc-${sanitizedEmail}
|
||||
name: home-cluster
|
||||
current-context: home-cluster
|
||||
users:
|
||||
- name: oidc-${sanitizedEmail}
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
command: kubectl
|
||||
args:
|
||||
- oidc-login
|
||||
- get-token
|
||||
- --oidc-issuer-url=${OIDC_ISSUER}
|
||||
- --oidc-client-id=${OIDC_CLIENT_ID}
|
||||
- --oidc-extra-scope=email
|
||||
- --oidc-extra-scope=profile
|
||||
- --oidc-extra-scope=groups
|
||||
interactiveMode: IfAvailable
|
||||
`;
|
||||
|
||||
return new Response(kubeconfig, {
|
||||
headers: {
|
||||
'Content-Type': 'application/yaml',
|
||||
'Content-Disposition': `attachment; filename="kubeconfig-home-cluster.yaml"`
|
||||
}
|
||||
});
|
||||
};
|
||||
|
|
@ -1,146 +0,0 @@
|
|||
<script>
|
||||
import { page } from '$app/stores';
|
||||
let showNamespaceOwner = $derived($page.url.searchParams.get('role') === 'namespace-owner');
|
||||
</script>
|
||||
|
||||
<main class="content">
|
||||
<h1>Getting Started</h1>
|
||||
<p>Welcome! Follow these steps to get access to the home Kubernetes cluster.</p>
|
||||
|
||||
<div class="role-tabs">
|
||||
<a href="/onboarding" class:active={!showNamespaceOwner}>General User</a>
|
||||
<a href="/onboarding?role=namespace-owner" class:active={showNamespaceOwner}>Namespace Owner</a>
|
||||
</div>
|
||||
|
||||
<section>
|
||||
<h2>Step 0 — Join the VPN</h2>
|
||||
<p>The cluster is on a private network (<code>10.0.20.0/24</code>). You need VPN access first.</p>
|
||||
<ol>
|
||||
<li>Install <a href="https://tailscale.com/download" target="_blank">Tailscale</a> for your OS</li>
|
||||
<li>Run this in your terminal:
|
||||
<pre>tailscale login --login-server https://headscale.viktorbarzin.me</pre>
|
||||
</li>
|
||||
<li>A browser window will open with a registration URL</li>
|
||||
<li>Send that URL to Viktor via email (<a href="mailto:vbarzin@gmail.com">vbarzin@gmail.com</a>) or Slack</li>
|
||||
<li>Wait for approval (usually within a few hours)</li>
|
||||
<li>Once approved, test: <pre>ping 10.0.20.100</pre></li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 1 — Log in to the portal</h2>
|
||||
<p>Visit <a href="https://k8s-portal.viktorbarzin.me">k8s-portal.viktorbarzin.me</a> and sign in with your Authentik account.</p>
|
||||
<p>If you don't have an account yet, ask Viktor to create one.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 2 — Set up kubectl</h2>
|
||||
<p>Run one of these commands in your terminal to install everything automatically:</p>
|
||||
<h3>macOS</h3>
|
||||
<p class="prereq">Requires <a href="https://brew.sh" target="_blank">Homebrew</a>. Install it first if you don't have it.</p>
|
||||
<pre>bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=mac)</pre>
|
||||
<h3>Linux</h3>
|
||||
<pre>bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</pre>
|
||||
<h3>Windows</h3>
|
||||
<p>Use <a href="https://learn.microsoft.com/en-us/windows/wsl/install" target="_blank">WSL2</a> and follow the Linux instructions.</p>
|
||||
</section>
|
||||
|
||||
{#if showNamespaceOwner}
|
||||
<section>
|
||||
<h2>Step 3 — Log into Vault</h2>
|
||||
<p>Vault manages your secrets and issues dynamic Kubernetes credentials.</p>
|
||||
<pre>vault login -method=oidc</pre>
|
||||
<p>This opens your browser for Authentik SSO. After login, your token is saved to <code>~/.vault-token</code>.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 4 — Verify kubectl access</h2>
|
||||
<p>Run this command. It will open your browser for OIDC login the first time:</p>
|
||||
<pre>kubectl get pods -n YOUR_NAMESPACE</pre>
|
||||
<p>You should see an empty list (no resources) or your running pods.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 5 — Clone the infra repo</h2>
|
||||
<pre>git clone https://github.com/ViktorBarzin/infra.git
|
||||
cd infra</pre>
|
||||
<p>This is where all the infrastructure configuration lives.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 6 — Create your first app stack</h2>
|
||||
<ol>
|
||||
<li>Copy the template: <pre>cp -r stacks/_template stacks/myapp
|
||||
mv stacks/myapp/main.tf.example stacks/myapp/main.tf</pre></li>
|
||||
<li>Edit <code>stacks/myapp/main.tf</code> — replace all <code><placeholders></code></li>
|
||||
<li>Store secrets in Vault:
|
||||
<pre>vault kv put secret/YOUR_USERNAME/myapp DB_PASSWORD=secret123</pre>
|
||||
</li>
|
||||
<li>Add your app domain to <code>domains</code> list in Vault KV <code>k8s_users</code></li>
|
||||
<li>Submit a PR:
|
||||
<pre>git checkout -b feat/myapp
|
||||
git add stacks/myapp/
|
||||
git commit -m "add myapp stack"
|
||||
git push -u origin feat/myapp</pre>
|
||||
</li>
|
||||
<li>Viktor reviews and merges</li>
|
||||
<li>After merge: <code>cd stacks/myapp && terragrunt apply</code></li>
|
||||
</ol>
|
||||
</section>
|
||||
{:else}
|
||||
<section>
|
||||
<h2>Step 3 — Verify access</h2>
|
||||
<p>Run this command. It will open your browser for login the first time:</p>
|
||||
<pre>kubectl get namespaces</pre>
|
||||
<p>You should see output like:</p>
|
||||
<pre class="output">NAME STATUS AGE
|
||||
default Active 200d
|
||||
kube-system Active 200d
|
||||
monitoring Active 200d
|
||||
...</pre>
|
||||
<p>If you get a connection error, make sure your VPN is connected (<code>tailscale status</code>).</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 4 — Clone the repo</h2>
|
||||
<pre>git clone https://github.com/ViktorBarzin/infra.git
|
||||
cd infra</pre>
|
||||
<p>This is where all the infrastructure configuration lives.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 5 — Install your AI assistant (optional)</h2>
|
||||
<p>Install <a href="https://github.com/openai/codex" target="_blank">Codex CLI</a> for AI-assisted cluster management:</p>
|
||||
<pre>npm install -g @openai/codex</pre>
|
||||
<p>Codex reads the <code>AGENTS.md</code> file in the repo and knows how to work with the cluster.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Step 6 — Your first change</h2>
|
||||
<ol>
|
||||
<li>Create a branch: <pre>git checkout -b my-first-change</pre></li>
|
||||
<li>Edit a service file (e.g., change an image tag in <code>stacks/echo/main.tf</code>)</li>
|
||||
<li>Commit and push: <pre>git add . && git commit -m "my first change" && git push -u origin my-first-change</pre></li>
|
||||
<li>Open a Pull Request on GitHub</li>
|
||||
<li>Viktor reviews and merges</li>
|
||||
<li>Woodpecker CI automatically applies the change to the cluster</li>
|
||||
<li>Slack notification confirms it worked</li>
|
||||
</ol>
|
||||
</section>
|
||||
{/if}
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
.content h3 { color: #666; margin: 1rem 0 0.25rem; }
|
||||
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
|
||||
.content pre.output { background: #f5f5f5; color: #333; }
|
||||
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
.content .prereq { font-size: 0.9rem; color: #666; font-style: italic; }
|
||||
section { margin: 2rem 0; }
|
||||
.role-tabs { display: flex; gap: 0; margin: 1.5rem 0; border-bottom: 2px solid #e0e0e0; }
|
||||
.role-tabs a { padding: 0.5rem 1.5rem; text-decoration: none; color: #666; border-bottom: 2px solid transparent; margin-bottom: -2px; }
|
||||
.role-tabs a.active { color: #333; border-bottom-color: #333; font-weight: 600; }
|
||||
</style>
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
<main class="content">
|
||||
<h1>Service Catalog</h1>
|
||||
<p>70+ services running on the cluster. Here are the most commonly used:</p>
|
||||
|
||||
<section>
|
||||
<h2>Core Services</h2>
|
||||
<table>
|
||||
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>Grafana</td><td><a href="https://grafana.viktorbarzin.me">grafana.viktorbarzin.me</a></td><td>Monitoring dashboards</td></tr>
|
||||
<tr><td>Uptime Kuma</td><td><a href="https://uptime.viktorbarzin.me">uptime.viktorbarzin.me</a></td><td>Service uptime monitoring</td></tr>
|
||||
<tr><td>Authentik</td><td><a href="https://authentik.viktorbarzin.me">authentik.viktorbarzin.me</a></td><td>Identity provider (SSO)</td></tr>
|
||||
<tr><td>Woodpecker CI</td><td><a href="https://ci.viktorbarzin.me">ci.viktorbarzin.me</a></td><td>CI/CD pipeline</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>User-Facing Services</h2>
|
||||
<table>
|
||||
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>Nextcloud</td><td><a href="https://nextcloud.viktorbarzin.me">nextcloud.viktorbarzin.me</a></td><td>File storage, calendar, contacts</td></tr>
|
||||
<tr><td>Immich</td><td><a href="https://immich.viktorbarzin.me">immich.viktorbarzin.me</a></td><td>Photo library (Google Photos alternative)</td></tr>
|
||||
<tr><td>Vaultwarden</td><td><a href="https://vault.viktorbarzin.me">vault.viktorbarzin.me</a></td><td>Password manager</td></tr>
|
||||
<tr><td>Paperless-ngx</td><td><a href="https://pdf.viktorbarzin.me">pdf.viktorbarzin.me</a></td><td>Document management</td></tr>
|
||||
<tr><td>Navidrome</td><td><a href="https://music.viktorbarzin.me">music.viktorbarzin.me</a></td><td>Music streaming</td></tr>
|
||||
<tr><td>Tandoor</td><td><a href="https://recipes.viktorbarzin.me">recipes.viktorbarzin.me</a></td><td>Recipe manager</td></tr>
|
||||
<tr><td>Linkwarden</td><td><a href="https://bookmarks.viktorbarzin.me">bookmarks.viktorbarzin.me</a></td><td>Bookmark manager</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Developer Tools</h2>
|
||||
<table>
|
||||
<thead><tr><th>Service</th><th>URL</th><th>Description</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>Forgejo</td><td><a href="https://forgejo.viktorbarzin.me">forgejo.viktorbarzin.me</a></td><td>Git server (Gitea fork)</td></tr>
|
||||
<tr><td>CyberChef</td><td><a href="https://cyberchef.viktorbarzin.me">cyberchef.viktorbarzin.me</a></td><td>Data transformation tool</td></tr>
|
||||
<tr><td>Excalidraw</td><td><a href="https://draw.viktorbarzin.me">draw.viktorbarzin.me</a></td><td>Whiteboard drawing</td></tr>
|
||||
<tr><td>PrivateBin</td><td><a href="https://paste.viktorbarzin.me">paste.viktorbarzin.me</a></td><td>Encrypted paste bin</td></tr>
|
||||
<tr><td>JSON Crack</td><td><a href="https://jsoncrack.viktorbarzin.me">jsoncrack.viktorbarzin.me</a></td><td>JSON visualizer</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
section { margin: 2rem 0; }
|
||||
table { border-collapse: collapse; width: 100%; }
|
||||
th, td { border: 1px solid #ddd; padding: 0.5rem; text-align: left; }
|
||||
th { background: #f5f5f5; }
|
||||
a { color: #1a73e8; }
|
||||
</style>
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
<main>
|
||||
<h1>Setup Instructions</h1>
|
||||
|
||||
<section>
|
||||
<h2>Quick Setup (one command)</h2>
|
||||
<p>Run this in your terminal to install everything and configure kubectl automatically:</p>
|
||||
<h3>macOS</h3>
|
||||
<pre>bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=mac)</pre>
|
||||
<h3>Linux</h3>
|
||||
<pre>bash <(curl -fsSL https://k8s-portal.viktorbarzin.me/setup/script?os=linux)</pre>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Manual Setup</h2>
|
||||
|
||||
<h3>1. Install kubectl</h3>
|
||||
<h4>macOS</h4>
|
||||
<pre>brew install kubectl</pre>
|
||||
<h4>Linux</h4>
|
||||
<pre>curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl && sudo mv kubectl /usr/local/bin/</pre>
|
||||
|
||||
<h3>2. Install kubelogin (OIDC plugin)</h3>
|
||||
<h4>macOS</h4>
|
||||
<pre>brew install int128/kubelogin/kubelogin</pre>
|
||||
<h4>Linux</h4>
|
||||
<pre>curl -LO https://github.com/int128/kubelogin/releases/latest/download/kubelogin_linux_amd64.zip
|
||||
unzip kubelogin_linux_amd64.zip && sudo mv kubelogin /usr/local/bin/kubectl-oidc_login
|
||||
rm kubelogin_linux_amd64.zip</pre>
|
||||
|
||||
<h3>3. Download and use your kubeconfig</h3>
|
||||
<pre>
|
||||
mkdir -p ~/.kube
|
||||
|
||||
# Download from the portal (requires auth cookie from browser)
|
||||
# Or use the download button on the portal homepage
|
||||
|
||||
# Set the KUBECONFIG environment variable
|
||||
export KUBECONFIG=~/.kube/config-home
|
||||
|
||||
# Test access (opens browser for login)
|
||||
kubectl get namespaces
|
||||
</pre>
|
||||
</section>
|
||||
|
||||
<p><a href="/">← Back to portal</a></p>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
main {
|
||||
max-width: 640px;
|
||||
margin: 2rem auto;
|
||||
font-family: system-ui;
|
||||
}
|
||||
pre {
|
||||
background: #1e1e1e;
|
||||
color: #d4d4d4;
|
||||
padding: 1rem;
|
||||
border-radius: 6px;
|
||||
overflow-x: auto;
|
||||
}
|
||||
section {
|
||||
margin: 2rem 0;
|
||||
}
|
||||
h4 {
|
||||
margin: 0.5rem 0 0.25rem;
|
||||
color: #666;
|
||||
}
|
||||
</style>
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
import type { RequestHandler } from './$types';
|
||||
import { readFileSync } from 'fs';
|
||||
|
||||
const CLUSTER_SERVER = 'https://10.0.20.100:6443';
|
||||
const OIDC_ISSUER = 'https://authentik.viktorbarzin.me/application/o/kubernetes/';
|
||||
const OIDC_CLIENT_ID = 'kubernetes';
|
||||
|
||||
export const GET: RequestHandler = async ({ url }) => {
|
||||
const os = url.searchParams.get('os') || 'mac';
|
||||
|
||||
let caCert = '';
|
||||
try {
|
||||
caCert = readFileSync('/config/ca.crt', 'utf-8');
|
||||
} catch {
|
||||
// CA cert not available
|
||||
}
|
||||
const caCertBase64 = Buffer.from(caCert).toString('base64');
|
||||
|
||||
const kubeconfigContent = `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ${CLUSTER_SERVER}
|
||||
certificate-authority-data: ${caCertBase64}
|
||||
name: home-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: home-cluster
|
||||
user: oidc-user
|
||||
name: home-cluster
|
||||
current-context: home-cluster
|
||||
users:
|
||||
- name: oidc-user
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1beta1
|
||||
command: kubectl
|
||||
args:
|
||||
- oidc-login
|
||||
- get-token
|
||||
- --oidc-issuer-url=${OIDC_ISSUER}
|
||||
- --oidc-client-id=${OIDC_CLIENT_ID}
|
||||
- --oidc-extra-scope=email
|
||||
- --oidc-extra-scope=profile
|
||||
- --oidc-extra-scope=groups
|
||||
interactiveMode: IfAvailable`;
|
||||
|
||||
let script: string;
|
||||
|
||||
if (os === 'linux') {
|
||||
script = `#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "=== Kubernetes Cluster Setup ==="
|
||||
echo ""
|
||||
|
||||
# Use sudo if available, otherwise install directly (e.g. in containers running as root)
|
||||
SUDO=""
|
||||
if [ "$(id -u)" -ne 0 ] && command -v sudo &>/dev/null; then
|
||||
SUDO="sudo"
|
||||
fi
|
||||
|
||||
# Determine install directory
|
||||
INSTALL_DIR="/usr/local/bin"
|
||||
if [ ! -w "\$INSTALL_DIR" ] && [ -z "\$SUDO" ]; then
|
||||
INSTALL_DIR="\$HOME/.local/bin"
|
||||
mkdir -p "\$INSTALL_DIR"
|
||||
export PATH="\$INSTALL_DIR:\$PATH"
|
||||
fi
|
||||
|
||||
# Install kubectl
|
||||
if command -v kubectl &>/dev/null; then
|
||||
echo "[OK] kubectl already installed"
|
||||
else
|
||||
echo "[..] Installing kubectl..."
|
||||
KUBECTL_VERSION=\$(curl -L -s https://dl.k8s.io/release/stable.txt)
|
||||
curl -fsSLO "https://dl.k8s.io/release/\${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl && \$SUDO mv kubectl "\$INSTALL_DIR/"
|
||||
echo "[OK] kubectl installed"
|
||||
fi
|
||||
|
||||
# Install kubelogin
|
||||
if command -v kubectl-oidc_login &>/dev/null; then
|
||||
echo "[OK] kubelogin already installed"
|
||||
else
|
||||
echo "[..] Installing kubelogin..."
|
||||
KUBELOGIN_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/int128/kubelogin/releases/latest | grep -o '[^/]*\$')
|
||||
curl -fsSLO "https://github.com/int128/kubelogin/releases/download/\${KUBELOGIN_VERSION}/kubelogin_linux_amd64.zip"
|
||||
unzip -o kubelogin_linux_amd64.zip kubelogin -d /tmp
|
||||
\$SUDO mv /tmp/kubelogin "\$INSTALL_DIR/kubectl-oidc_login"
|
||||
rm -f kubelogin_linux_amd64.zip
|
||||
echo "[OK] kubelogin installed"
|
||||
fi
|
||||
|
||||
# Install kubeseal
|
||||
if command -v kubeseal &>/dev/null; then
|
||||
echo "[OK] kubeseal already installed"
|
||||
else
|
||||
echo "[..] Installing kubeseal..."
|
||||
KUBESEAL_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/bitnami-labs/sealed-secrets/releases/latest | grep -o '[^/]*\$')
|
||||
curl -fsSLO "https://github.com/bitnami-labs/sealed-secrets/releases/download/\${KUBESEAL_VERSION}/kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz"
|
||||
tar -xzf "kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz" kubeseal
|
||||
\$SUDO mv kubeseal "\$INSTALL_DIR/"
|
||||
rm -f "kubeseal-\${KUBESEAL_VERSION#v}-linux-amd64.tar.gz"
|
||||
echo "[OK] kubeseal installed"
|
||||
fi
|
||||
|
||||
# Install Vault CLI
|
||||
if command -v vault &>/dev/null; then
|
||||
echo "[OK] vault already installed"
|
||||
else
|
||||
echo "[..] Installing Vault CLI..."
|
||||
VAULT_VERSION="1.18.1"
|
||||
curl -fsSLO "https://releases.hashicorp.com/vault/\${VAULT_VERSION}/vault_\${VAULT_VERSION}_linux_amd64.zip"
|
||||
unzip -o "vault_\${VAULT_VERSION}_linux_amd64.zip" vault -d /tmp
|
||||
\$SUDO mv /tmp/vault "\$INSTALL_DIR/"
|
||||
rm -f "vault_\${VAULT_VERSION}_linux_amd64.zip"
|
||||
echo "[OK] vault installed"
|
||||
fi
|
||||
|
||||
# Install Terragrunt
|
||||
if command -v terragrunt &>/dev/null; then
|
||||
echo "[OK] terragrunt already installed"
|
||||
else
|
||||
echo "[..] Installing terragrunt..."
|
||||
TG_VERSION=\$(curl -fsSL -o /dev/null -w "%{url_effective}" https://github.com/gruntwork-io/terragrunt/releases/latest | grep -o '[^/]*\$')
|
||||
curl -fsSLO "https://github.com/gruntwork-io/terragrunt/releases/download/\${TG_VERSION}/terragrunt_linux_amd64"
|
||||
chmod +x terragrunt_linux_amd64
|
||||
\$SUDO mv terragrunt_linux_amd64 "\$INSTALL_DIR/terragrunt"
|
||||
echo "[OK] terragrunt installed"
|
||||
fi
|
||||
|
||||
# Install Terraform
|
||||
if command -v terraform &>/dev/null; then
|
||||
echo "[OK] terraform already installed"
|
||||
else
|
||||
echo "[..] Installing terraform..."
|
||||
TF_VERSION="1.9.8"
|
||||
curl -fsSLO "https://releases.hashicorp.com/terraform/\${TF_VERSION}/terraform_\${TF_VERSION}_linux_amd64.zip"
|
||||
unzip -o "terraform_\${TF_VERSION}_linux_amd64.zip" terraform -d /tmp
|
||||
\$SUDO mv /tmp/terraform "\$INSTALL_DIR/"
|
||||
rm -f "terraform_\${TF_VERSION}_linux_amd64.zip"
|
||||
echo "[OK] terraform installed"
|
||||
fi
|
||||
|
||||
# Write kubeconfig
|
||||
mkdir -p ~/.kube
|
||||
cat > ~/.kube/config-home << 'KUBECONFIG_EOF'
|
||||
${kubeconfigContent}
|
||||
KUBECONFIG_EOF
|
||||
echo "[OK] Kubeconfig written to ~/.kube/config-home"
|
||||
|
||||
# Add KUBECONFIG to shell profile
|
||||
SHELL_RC=~/.bashrc
|
||||
[ -f ~/.zshrc ] && SHELL_RC=~/.zshrc
|
||||
if ! grep -q 'config-home' "\$SHELL_RC" 2>/dev/null; then
|
||||
echo 'export KUBECONFIG=~/.kube/config-home' >> "\$SHELL_RC"
|
||||
echo "[OK] Added KUBECONFIG to \$SHELL_RC"
|
||||
fi
|
||||
export KUBECONFIG=~/.kube/config-home
|
||||
|
||||
echo ""
|
||||
echo "=== Setup complete! ==="
|
||||
echo ""
|
||||
echo "Run 'kubectl get namespaces' to test (opens browser for login)."
|
||||
echo "You may need to restart your shell or run: export KUBECONFIG=~/.kube/config-home"
|
||||
`;
|
||||
} else {
|
||||
script = `#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "=== Kubernetes Cluster Setup ==="
|
||||
echo ""
|
||||
|
||||
# Check for Homebrew
|
||||
if ! command -v brew &>/dev/null; then
|
||||
echo "[!!] Homebrew not found. Install it first:"
|
||||
echo ' /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install kubectl
|
||||
if command -v kubectl &>/dev/null; then
|
||||
echo "[OK] kubectl already installed ($(kubectl version --client -o json 2>/dev/null | grep -o '"gitVersion":"[^"]*"' | cut -d'"' -f4))"
|
||||
else
|
||||
echo "[..] Installing kubectl..."
|
||||
brew install kubectl
|
||||
echo "[OK] kubectl installed"
|
||||
fi
|
||||
|
||||
# Install kubelogin
|
||||
if command -v kubectl-oidc_login &>/dev/null; then
|
||||
echo "[OK] kubelogin already installed"
|
||||
else
|
||||
echo "[..] Installing kubelogin..."
|
||||
brew install int128/kubelogin/kubelogin
|
||||
echo "[OK] kubelogin installed"
|
||||
fi
|
||||
|
||||
# Install kubeseal
|
||||
if command -v kubeseal &>/dev/null; then
|
||||
echo "[OK] kubeseal already installed"
|
||||
else
|
||||
echo "[..] Installing kubeseal..."
|
||||
brew install kubeseal
|
||||
echo "[OK] kubeseal installed"
|
||||
fi
|
||||
|
||||
# Install Vault CLI
|
||||
if command -v vault &>/dev/null; then
|
||||
echo "[OK] vault already installed"
|
||||
else
|
||||
echo "[..] Installing Vault CLI..."
|
||||
brew tap hashicorp/tap
|
||||
brew install hashicorp/tap/vault
|
||||
echo "[OK] vault installed"
|
||||
fi
|
||||
|
||||
# Install Terragrunt
|
||||
if command -v terragrunt &>/dev/null; then
|
||||
echo "[OK] terragrunt already installed"
|
||||
else
|
||||
echo "[..] Installing terragrunt..."
|
||||
brew install terragrunt
|
||||
echo "[OK] terragrunt installed"
|
||||
fi
|
||||
|
||||
# Install Terraform
|
||||
if command -v terraform &>/dev/null; then
|
||||
echo "[OK] terraform already installed"
|
||||
else
|
||||
echo "[..] Installing terraform..."
|
||||
brew install hashicorp/tap/terraform
|
||||
echo "[OK] terraform installed"
|
||||
fi
|
||||
|
||||
# Write kubeconfig
|
||||
mkdir -p ~/.kube
|
||||
cat > ~/.kube/config-home << 'KUBECONFIG_EOF'
|
||||
${kubeconfigContent}
|
||||
KUBECONFIG_EOF
|
||||
echo "[OK] Kubeconfig written to ~/.kube/config-home"
|
||||
|
||||
# Add KUBECONFIG to shell profile
|
||||
SHELL_RC=~/.zshrc
|
||||
[ ! -f ~/.zshrc ] && SHELL_RC=~/.bashrc
|
||||
if ! grep -q 'config-home' "\$SHELL_RC" 2>/dev/null; then
|
||||
echo 'export KUBECONFIG=~/.kube/config-home' >> "\$SHELL_RC"
|
||||
echo "[OK] Added KUBECONFIG to \$SHELL_RC"
|
||||
fi
|
||||
export KUBECONFIG=~/.kube/config-home
|
||||
|
||||
echo ""
|
||||
echo "=== Setup complete! ==="
|
||||
echo ""
|
||||
echo "Run 'kubectl get namespaces' to test (opens browser for login)."
|
||||
echo "You may need to restart your shell or run: export KUBECONFIG=~/.kube/config-home"
|
||||
`;
|
||||
}
|
||||
|
||||
return new Response(script, {
|
||||
headers: {
|
||||
'Content-Type': 'text/plain; charset=utf-8'
|
||||
}
|
||||
});
|
||||
};
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
<main class="content">
|
||||
<h1>Troubleshooting</h1>
|
||||
|
||||
<section>
|
||||
<h2>"kubectl can't connect to the server"</h2>
|
||||
<ol>
|
||||
<li>Check your VPN: <code>tailscale status</code> — should show "connected"</li>
|
||||
<li>Check KUBECONFIG: <code>echo $KUBECONFIG</code> — should be <code>~/.kube/config-home</code></li>
|
||||
<li>Test connectivity: <code>ping 10.0.20.100</code></li>
|
||||
<li>If ping works but kubectl doesn't, re-run the <a href="/setup">setup script</a></li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>"Forbidden" or "Permission denied"</h2>
|
||||
<p>You may not have access to that namespace. Your access is scoped to specific namespaces.</p>
|
||||
<p>Try: <code>kubectl get namespaces</code> to see which namespaces you can access.</p>
|
||||
<p>Need access to another namespace? Ask Viktor.</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>"Pod is CrashLoopBackOff"</h2>
|
||||
<ol>
|
||||
<li>Check pod logs: <code>kubectl logs -n <namespace> <pod-name> --tail=50</code></li>
|
||||
<li>Check previous crash: <code>kubectl logs -n <namespace> <pod-name> --previous</code></li>
|
||||
<li>Check events: <code>kubectl describe pod -n <namespace> <pod-name></code></li>
|
||||
<li>Common causes: OOMKilled (need more memory), bad config, database connection failure</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>"PR CI failed"</h2>
|
||||
<ol>
|
||||
<li>Check the Woodpecker CI dashboard: <a href="https://ci.viktorbarzin.me">ci.viktorbarzin.me</a></li>
|
||||
<li>Read the build logs — the error is usually at the bottom</li>
|
||||
<li>Fix the issue, commit, and push — CI will re-run</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>"I need a new secret / database password"</h2>
|
||||
<p>Secrets are managed by Viktor in an encrypted file. You cannot add them yourself.</p>
|
||||
<ol>
|
||||
<li>Comment on your PR: "Need DB password for <service>"</li>
|
||||
<li>Viktor adds the secret and pushes to your branch</li>
|
||||
<li>Reference it as <code>var.<service>_db_password</code> in your Terraform</li>
|
||||
</ol>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2>Still stuck?</h2>
|
||||
<p>Email Viktor at <a href="mailto:vbarzin@gmail.com">vbarzin@gmail.com</a> or message on Slack.</p>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.content { max-width: 768px; margin: 2rem auto; padding: 0 1rem; font-family: system-ui, -apple-system, sans-serif; line-height: 1.6; }
|
||||
.content h1 { border-bottom: 1px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
.content h2 { margin-top: 2rem; color: #333; }
|
||||
.content pre { background: #1e1e1e; color: #d4d4d4; padding: 1rem; border-radius: 6px; overflow-x: auto; }
|
||||
.content code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
|
||||
section { margin: 2rem 0; }
|
||||
</style>
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# allow crawling everything by default
|
||||
User-agent: *
|
||||
Disallow:
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
import adapter from '@sveltejs/adapter-node';
|
||||
|
||||
/** @type {import('@sveltejs/kit').Config} */
|
||||
const config = {
|
||||
kit: {
|
||||
adapter: adapter()
|
||||
}
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"extends": "./.svelte-kit/tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"rewriteRelativeImportExtensions": true,
|
||||
"allowJs": true,
|
||||
"checkJs": true,
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"skipLibCheck": true,
|
||||
"sourceMap": true,
|
||||
"strict": true,
|
||||
"moduleResolution": "bundler"
|
||||
}
|
||||
// Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias
|
||||
// except $lib which is handled by https://svelte.dev/docs/kit/configuration#files
|
||||
//
|
||||
// To make changes to top-level options such as include and exclude, we recommend extending
|
||||
// the generated config; see https://svelte.dev/docs/kit/configuration#typescript
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
import { sveltekit } from '@sveltejs/kit/vite';
|
||||
import { defineConfig } from 'vite';
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [sveltekit()]
|
||||
});
|
||||
|
|
@ -1,166 +0,0 @@
|
|||
variable "tls_secret_name" {}
|
||||
variable "tier" { type = string }
|
||||
variable "k8s_ca_cert" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "k8s_portal" {
|
||||
metadata {
|
||||
name = "k8s-portal"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "k8s_portal_config" {
|
||||
metadata {
|
||||
name = "k8s-portal-config"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
}
|
||||
|
||||
data = {
|
||||
"ca.crt" = var.k8s_ca_cert
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "k8s_portal" {
|
||||
metadata {
|
||||
name = "k8s-portal"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
labels = {
|
||||
app = "k8s-portal"
|
||||
tier = var.tier
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
replicas = 1
|
||||
strategy {
|
||||
type = "Recreate"
|
||||
}
|
||||
revision_history_limit = 3
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "k8s-portal"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "k8s-portal"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
container {
|
||||
name = "portal"
|
||||
image = "viktorbarzin/k8s-portal:latest"
|
||||
port {
|
||||
container_port = 3000
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/config/ca.crt"
|
||||
sub_path = "ca.crt"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "user-roles"
|
||||
mount_path = "/config/users.json"
|
||||
sub_path = "users.json"
|
||||
read_only = true
|
||||
}
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "10m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "config"
|
||||
config_map {
|
||||
name = kubernetes_config_map.k8s_portal_config.metadata[0].name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "user-roles"
|
||||
config_map {
|
||||
name = "k8s-user-roles"
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
spec[0].template[0].spec[0].dns_config,
|
||||
spec[0].template[0].spec[0].container[0].image, # CI updates image tag
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "k8s_portal" {
|
||||
metadata {
|
||||
name = "k8s-portal"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = "k8s-portal"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 3000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
name = "k8s-portal"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = true # Require Authentik login
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "K8s Portal"
|
||||
"gethomepage.dev/description" = "Kubernetes portal"
|
||||
"gethomepage.dev/icon" = "kubernetes.png"
|
||||
"gethomepage.dev/group" = "Core Platform"
|
||||
"gethomepage.dev/pod-selector" = ""
|
||||
}
|
||||
}
|
||||
|
||||
# Unprotected ingress for the setup script and agent endpoint (needs to be curl-able without auth)
|
||||
module "ingress_setup_script" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = kubernetes_namespace.k8s_portal.metadata[0].name
|
||||
name = "k8s-portal-setup"
|
||||
host = "k8s-portal"
|
||||
service_name = "k8s-portal"
|
||||
ingress_path = ["/setup/script", "/agent"]
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = false
|
||||
}
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
|
||||
# =============================================================================
|
||||
# Pod Dependency Init Container Injection
|
||||
# =============================================================================
|
||||
# Reads the annotation dependency.kyverno.io/wait-for from pods and injects
|
||||
# init containers that wait for each listed dependency to be reachable.
|
||||
#
|
||||
# Usage:
|
||||
# annotations:
|
||||
# dependency.kyverno.io/wait-for: "postgresql.dbaas:5432,redis.redis:6379"
|
||||
#
|
||||
# Each comma-separated entry becomes a busybox init container that runs
|
||||
# `nc -z <host> <port>` in a loop until the dependency is reachable.
|
||||
# Existing init containers are preserved — Kyverno appends to the array.
|
||||
|
||||
resource "kubernetes_manifest" "inject_dependency_init_containers" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "inject-dependency-init-containers"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Inject Dependency Init Containers"
|
||||
"policies.kyverno.io/description" = "Injects wait-for init containers based on dependency.kyverno.io/wait-for pod annotation. Each comma-separated host:port entry becomes a busybox init container that blocks until the dependency is reachable via nc -z."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
rules = [
|
||||
{
|
||||
name = "wait-for-dependencies"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
operations = ["CREATE"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
preconditions = {
|
||||
all = [
|
||||
{
|
||||
key = "{{ request.object.metadata.annotations.\"dependency.kyverno.io/wait-for\" || '' }}"
|
||||
operator = "NotEquals"
|
||||
value = ""
|
||||
}
|
||||
]
|
||||
}
|
||||
mutate = {
|
||||
foreach = [
|
||||
{
|
||||
list = "request.object.metadata.annotations.\"dependency.kyverno.io/wait-for\" | split(@, ',')"
|
||||
patchStrategicMerge = {
|
||||
spec = {
|
||||
initContainers = [
|
||||
{
|
||||
name = "wait-for-{{ element | split(@, ':') | [0] | replace_all(@, '.', '-') }}"
|
||||
image = "busybox:1.37"
|
||||
command = ["sh", "-c", "until nc -z {{ element | split(@, ':') | [0] }} {{ element | split(@, ':') | [1] }}; do echo waiting for {{ element }}; sleep 2; done"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,216 +0,0 @@
|
|||
|
||||
resource "kubernetes_namespace" "kyverno" {
|
||||
metadata {
|
||||
name = "kyverno"
|
||||
labels = {
|
||||
"istio-injection" : "disabled"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "kyverno" {
|
||||
namespace = kubernetes_namespace.kyverno.metadata[0].name
|
||||
create_namespace = false
|
||||
name = "kyverno"
|
||||
atomic = true
|
||||
|
||||
repository = "https://kyverno.github.io/kyverno/"
|
||||
chart = "kyverno"
|
||||
version = "3.6.1"
|
||||
|
||||
values = [yamlencode({
|
||||
# When Kyverno is unavailable, allow pod creation to proceed without
|
||||
# mutation/validation rather than blocking all admissions cluster-wide.
|
||||
features = {
|
||||
forceFailurePolicyIgnore = {
|
||||
enabled = true
|
||||
}
|
||||
policyReports = {
|
||||
enabled = false
|
||||
}
|
||||
}
|
||||
|
||||
reportsController = {
|
||||
resources = {
|
||||
limits = {
|
||||
memory = "512Mi"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "384Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backgroundController = {
|
||||
resources = {
|
||||
limits = {
|
||||
memory = "384Mi"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "384Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanupController = {
|
||||
resources = {
|
||||
limits = {
|
||||
memory = "192Mi"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "192Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
admissionController = {
|
||||
replicas = 2
|
||||
|
||||
updateStrategy = {
|
||||
type = "RollingUpdate"
|
||||
rollingUpdate = {
|
||||
maxSurge = 0
|
||||
maxUnavailable = 1
|
||||
}
|
||||
}
|
||||
|
||||
container = {
|
||||
resources = {
|
||||
limits = {
|
||||
memory = "256Mi"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "256Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# More tolerant liveness probe — API server slowness shouldn't kill the pod
|
||||
livenessProbe = {
|
||||
httpGet = {
|
||||
path = "/health/liveness"
|
||||
port = 9443
|
||||
scheme = "HTTPS"
|
||||
}
|
||||
initialDelaySeconds = 15
|
||||
periodSeconds = 30
|
||||
timeoutSeconds = 5
|
||||
failureThreshold = 4
|
||||
successThreshold = 1
|
||||
}
|
||||
|
||||
# Spread replicas across nodes for HA
|
||||
topologySpreadConstraints = [
|
||||
{
|
||||
maxSkew = 1
|
||||
topologyKey = "kubernetes.io/hostname"
|
||||
whenUnsatisfiable = "DoNotSchedule"
|
||||
labelSelector = {
|
||||
matchLabels = {
|
||||
"app.kubernetes.io/component" = "admission-controller"
|
||||
"app.kubernetes.io/instance" = "kyverno"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})]
|
||||
}
|
||||
|
||||
# To unlabel all:
|
||||
# kubectl label deployment,statefulset,daemonset --all-namespaces -l tier tier-
|
||||
#
|
||||
# Uses namespaceSelector to match tiers — no API call needed.
|
||||
# One rule per tier so Kyverno resolves the tier value from its informer cache.
|
||||
resource "kubernetes_manifest" "mutate_tier_from_namespace" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "sync-tier-label-from-namespace"
|
||||
}
|
||||
spec = {
|
||||
rules = [for tier in local.governance_tiers : {
|
||||
name = "sync-tier-${tier}"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Deployment", "StatefulSet", "DaemonSet"]
|
||||
namespaceSelector = {
|
||||
matchLabels = {
|
||||
tier = tier
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
namespaces = ["kube-system", "metallb-system", "n8n"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
mutate = {
|
||||
patchStrategicMerge = {
|
||||
metadata = {
|
||||
labels = {
|
||||
"+(tier)" = tier
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# resource "kubernetes_manifest" "enforce_pod_tier_label" {
|
||||
# manifest = {
|
||||
# apiVersion = "kyverno.io/v1"
|
||||
# kind = "ClusterPolicy"
|
||||
# metadata = {
|
||||
# name = "enforce-pod-tier-label"
|
||||
# annotations = {
|
||||
# "policies.kyverno.io/description" = "Rejects any pod that does not have a tier label."
|
||||
# }
|
||||
# }
|
||||
# spec = {
|
||||
# # 'Enforce' blocks the creation. 'Audit' just reports it.
|
||||
# validationFailureAction = "Enforce"
|
||||
# background = true
|
||||
# rules = [
|
||||
# {
|
||||
# name = "check-for-tier-label"
|
||||
# match = {
|
||||
# any = [
|
||||
# {
|
||||
# resources = {
|
||||
# kinds = ["Pod"]
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# validate = {
|
||||
# message = "The label 'tier' is required for all pods in this cluster."
|
||||
# pattern = {
|
||||
# metadata = {
|
||||
# labels = {
|
||||
# "tier" = "?*" # The "?*" syntax means the value must not be empty
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
|
@ -1,950 +0,0 @@
|
|||
|
||||
# =============================================================================
|
||||
# Tier-Based Resource Governance
|
||||
# =============================================================================
|
||||
# default (limit) = defaultRequest (request) to give Guaranteed QoS and prevent
|
||||
# memory overcommit. Changed 2026-03-14 after node2 OOM crash caused by 250%
|
||||
# memory overcommit (61GB limits on 24GB node).
|
||||
#
|
||||
# Four layers of protection against noisy neighbor issues:
|
||||
# 1. PriorityClasses - critical services survive resource pressure
|
||||
# 2. LimitRange defaults (Kyverno generate) - auto-inject defaults for containers without resources
|
||||
# 3. ResourceQuotas (Kyverno generate) - hard ceiling on namespace resource consumption
|
||||
# 4. Priority injection (Kyverno mutate) - set priorityClassName based on namespace tier label
|
||||
|
||||
locals {
|
||||
governance_tiers = ["0-core", "1-cluster", "2-gpu", "3-edge", "4-aux"]
|
||||
excluded_namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Layer 1: PriorityClasses
|
||||
# -----------------------------------------------------------------------------
|
||||
# Values stay well below system-cluster-critical (2,000,000,000)
|
||||
|
||||
resource "kubernetes_priority_class" "tier_0_core" {
|
||||
metadata {
|
||||
name = "tier-0-core"
|
||||
}
|
||||
value = 1000000
|
||||
global_default = false
|
||||
preemption_policy = "PreemptLowerPriority"
|
||||
description = "Critical infrastructure: ingress, DNS, VPN, auth, monitoring"
|
||||
}
|
||||
|
||||
resource "kubernetes_priority_class" "tier_1_cluster" {
|
||||
metadata {
|
||||
name = "tier-1-cluster"
|
||||
}
|
||||
value = 800000
|
||||
global_default = false
|
||||
preemption_policy = "PreemptLowerPriority"
|
||||
description = "Cluster services: Redis, metrics, security"
|
||||
}
|
||||
|
||||
resource "kubernetes_priority_class" "tier_2_gpu" {
|
||||
metadata {
|
||||
name = "tier-2-gpu"
|
||||
}
|
||||
value = 600000
|
||||
global_default = false
|
||||
preemption_policy = "PreemptLowerPriority"
|
||||
description = "GPU workloads: Immich, Ollama, Frigate"
|
||||
}
|
||||
|
||||
resource "kubernetes_priority_class" "gpu_workload" {
|
||||
metadata {
|
||||
name = "gpu-workload"
|
||||
}
|
||||
value = 1200000
|
||||
global_default = false
|
||||
preemption_policy = "PreemptLowerPriority"
|
||||
description = "GPU-pinned workloads. Higher than all user tiers. Auto-injected by Kyverno on pods requesting nvidia.com/gpu."
|
||||
}
|
||||
|
||||
resource "kubernetes_priority_class" "tier_3_edge" {
|
||||
metadata {
|
||||
name = "tier-3-edge"
|
||||
}
|
||||
value = 400000
|
||||
global_default = false
|
||||
preemption_policy = "PreemptLowerPriority"
|
||||
description = "User-facing services: mail, file sync, dashboards"
|
||||
}
|
||||
|
||||
resource "kubernetes_priority_class" "tier_4_aux" {
|
||||
metadata {
|
||||
name = "tier-4-aux"
|
||||
}
|
||||
value = 200000
|
||||
global_default = false
|
||||
preemption_policy = "Never"
|
||||
description = "Optional services: blogs, tools, experiments. Will not preempt other aux services."
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Layer 2: LimitRange Defaults (Kyverno Generate)
|
||||
# -----------------------------------------------------------------------------
|
||||
# Creates a LimitRange in each namespace based on its tier label.
|
||||
# Only affects containers WITHOUT explicit resource requests/limits.
|
||||
|
||||
resource "kubernetes_manifest" "generate_limitrange_by_tier" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "generate-limitrange-by-tier"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Generate LimitRange by Tier"
|
||||
"policies.kyverno.io/description" = "Creates tier-appropriate LimitRange defaults in namespaces based on their tier label. Only affects containers without explicit resource specifications. Excludes namespaces with resource-governance/custom-limitrange label."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
generateExisting = true
|
||||
rules = [
|
||||
# Tier 0-core
|
||||
{
|
||||
name = "limitrange-tier-0-core"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "0-core"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-limitrange" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "256Mi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "100m"
|
||||
memory = "256Mi"
|
||||
}
|
||||
max = {
|
||||
memory = "8Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 1-cluster
|
||||
{
|
||||
name = "limitrange-tier-1-cluster"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "1-cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-limitrange" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "256Mi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "100m"
|
||||
memory = "256Mi"
|
||||
}
|
||||
max = {
|
||||
memory = "4Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 2-gpu
|
||||
{
|
||||
name = "limitrange-tier-2-gpu"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "2-gpu"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-limitrange" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "1Gi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "200m"
|
||||
memory = "1Gi"
|
||||
}
|
||||
max = {
|
||||
memory = "16Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 3-edge — Burstable QoS: request < limit to reduce scheduler pressure
|
||||
{
|
||||
name = "limitrange-tier-3-edge"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "3-edge"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-limitrange" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "192Mi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "50m"
|
||||
memory = "96Mi"
|
||||
}
|
||||
max = {
|
||||
memory = "4Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 4-aux — Burstable QoS: request < limit to reduce scheduler pressure
|
||||
{
|
||||
name = "limitrange-tier-4-aux"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "4-aux"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-limitrange" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "256Mi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "50m"
|
||||
memory = "64Mi"
|
||||
}
|
||||
max = {
|
||||
memory = "4Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Fallback: namespaces without a tier label get aux-level defaults
|
||||
# requests = limits to prevent memory overcommit (2026-03-14 node2 OOM incident)
|
||||
{
|
||||
name = "limitrange-no-tier-fallback"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchExpressions = [
|
||||
{
|
||||
key = "tier"
|
||||
operator = "Exists"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
resources = {
|
||||
namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "LimitRange"
|
||||
name = "tier-defaults"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
limits = [
|
||||
{
|
||||
type = "Container"
|
||||
default = {
|
||||
memory = "128Mi"
|
||||
}
|
||||
defaultRequest = {
|
||||
cpu = "50m"
|
||||
memory = "128Mi"
|
||||
}
|
||||
max = {
|
||||
memory = "2Gi"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Layer 3: ResourceQuotas (Kyverno Generate)
|
||||
# -----------------------------------------------------------------------------
|
||||
# Creates a ResourceQuota in each namespace based on its tier label.
|
||||
# Sets hard ceiling on total namespace resource consumption.
|
||||
# Namespaces with label resource-governance/custom-quota=true are excluded.
|
||||
#
|
||||
# IMPORTANT: LimitRange (Layer 2) must exist before ResourceQuota takes effect,
|
||||
# because ResourceQuota requires all pods to have resource requests set.
|
||||
|
||||
resource "kubernetes_manifest" "generate_resourcequota_by_tier" {
|
||||
depends_on = [kubernetes_manifest.generate_limitrange_by_tier]
|
||||
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "generate-resourcequota-by-tier"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Generate ResourceQuota by Tier"
|
||||
"policies.kyverno.io/description" = "Creates tier-appropriate ResourceQuota in namespaces based on their tier label. Excludes namespaces with resource-governance/custom-quota label."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
generateExisting = true
|
||||
rules = [
|
||||
# Tier 0-core
|
||||
{
|
||||
name = "quota-tier-0-core"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "0-core"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "ResourceQuota"
|
||||
name = "tier-quota"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
hard = {
|
||||
"requests.cpu" = "8"
|
||||
"requests.memory" = "8Gi"
|
||||
"limits.memory" = "64Gi"
|
||||
pods = "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 1-cluster
|
||||
{
|
||||
name = "quota-tier-1-cluster"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "1-cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "ResourceQuota"
|
||||
name = "tier-quota"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
hard = {
|
||||
"requests.cpu" = "4"
|
||||
"requests.memory" = "4Gi"
|
||||
"limits.memory" = "32Gi"
|
||||
pods = "30"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 2-gpu
|
||||
{
|
||||
name = "quota-tier-2-gpu"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "2-gpu"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "ResourceQuota"
|
||||
name = "tier-quota"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
hard = {
|
||||
"requests.cpu" = "8"
|
||||
"requests.memory" = "8Gi"
|
||||
"limits.memory" = "32Gi"
|
||||
pods = "40"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 3-edge
|
||||
{
|
||||
name = "quota-tier-3-edge"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "3-edge"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "ResourceQuota"
|
||||
name = "tier-quota"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
hard = {
|
||||
"requests.cpu" = "4"
|
||||
"requests.memory" = "4Gi"
|
||||
"limits.memory" = "32Gi"
|
||||
pods = "30"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
# Tier 4-aux
|
||||
{
|
||||
name = "quota-tier-4-aux"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Namespace"]
|
||||
selector = {
|
||||
matchLabels = {
|
||||
tier = "4-aux"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
selector = {
|
||||
matchLabels = {
|
||||
"resource-governance/custom-quota" = "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
generate = {
|
||||
synchronize = true
|
||||
apiVersion = "v1"
|
||||
kind = "ResourceQuota"
|
||||
name = "tier-quota"
|
||||
namespace = "{{request.object.metadata.name}}"
|
||||
data = {
|
||||
spec = {
|
||||
hard = {
|
||||
"requests.cpu" = "2"
|
||||
"requests.memory" = "2Gi"
|
||||
"limits.memory" = "16Gi"
|
||||
pods = "20"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Layer 4: PriorityClassName Injection (Kyverno Mutate)
|
||||
# -----------------------------------------------------------------------------
|
||||
# Automatically sets priorityClassName on Pods based on their namespace's tier label.
|
||||
# Skips pods that already have a priorityClassName set.
|
||||
# Uses namespaceSelector instead of API calls — no round-trip to the API server.
|
||||
|
||||
resource "kubernetes_manifest" "mutate_priority_from_tier" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "inject-priority-class-from-tier"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Inject PriorityClass from Tier"
|
||||
"policies.kyverno.io/description" = "Sets priorityClassName on Pods based on the namespace tier label. Skips pods that already have a priorityClassName."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
rules = [for tier in local.governance_tiers : {
|
||||
name = "inject-priority-${tier}"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
operations = ["CREATE"]
|
||||
namespaceSelector = {
|
||||
matchLabels = {
|
||||
tier = tier
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
namespaces = local.excluded_namespaces
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
preconditions = {
|
||||
all = [
|
||||
{
|
||||
key = "{{request.object.spec.priorityClassName || ''}}"
|
||||
operator = "Equals"
|
||||
value = ""
|
||||
}
|
||||
]
|
||||
}
|
||||
mutate = {
|
||||
patchesJson6902 = yamlencode([
|
||||
{
|
||||
op = "remove"
|
||||
path = "/spec/priority"
|
||||
},
|
||||
{
|
||||
op = "remove"
|
||||
path = "/spec/preemptionPolicy"
|
||||
},
|
||||
{
|
||||
op = "add"
|
||||
path = "/spec/priorityClassName"
|
||||
value = "tier-${tier}"
|
||||
}
|
||||
])
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# --- ndots:2 injection ---
|
||||
# Kubernetes defaults to ndots:5, which causes 4 wasted NxDomain queries per
|
||||
# external DNS lookup (search domain expansion). This policy injects ndots:2
|
||||
# on all pods to reduce NxDomain flood while still allowing short-name service
|
||||
# resolution (e.g. "redis.redis" has 1 dot, so it still expands).
|
||||
resource "kubernetes_manifest" "mutate_ndots" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "inject-ndots"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Inject ndots:2 DNS Config"
|
||||
"policies.kyverno.io/description" = "Sets ndots:2 on all Pods to reduce NxDomain query flood from search domain expansion. Skips pods that already have ndots configured."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
rules = [
|
||||
{
|
||||
name = "inject-ndots-2"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
namespaces = ["kube-system", "metallb-system", "kyverno", "calico-system", "calico-apiserver"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
preconditions = {
|
||||
all = [
|
||||
{
|
||||
key = "{{ request.object.spec.dnsConfig.options || `[]` | [?name == 'ndots'] | length(@) }}"
|
||||
operator = "Equals"
|
||||
value = "0"
|
||||
}
|
||||
]
|
||||
}
|
||||
mutate = {
|
||||
patchStrategicMerge = {
|
||||
spec = {
|
||||
dnsConfig = {
|
||||
options = [
|
||||
{
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Layer 5: GPU Workload Priority Override (Kyverno Mutate)
|
||||
# -----------------------------------------------------------------------------
|
||||
# Overrides the tier-based priorityClassName with gpu-workload for pods that
|
||||
# actually request nvidia.com/gpu resources. This ensures GPU pods can preempt
|
||||
# non-GPU pods on the GPU node, regardless of namespace tier.
|
||||
# Runs after Layer 4 (tier injection), so it overrides the tier-based priority.
|
||||
|
||||
resource "kubernetes_manifest" "mutate_gpu_priority" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "inject-gpu-workload-priority"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Inject GPU Workload Priority"
|
||||
"policies.kyverno.io/description" = "Overrides priorityClassName to gpu-workload for pods requesting nvidia.com/gpu resources. Runs after tier-based injection."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
rules = [
|
||||
{
|
||||
name = "gpu-priority-override"
|
||||
match = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
operations = ["CREATE"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
exclude = {
|
||||
any = [
|
||||
{
|
||||
resources = {
|
||||
namespaces = local.excluded_namespaces
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
preconditions = {
|
||||
any = [
|
||||
{
|
||||
key = "{{ request.object.spec.containers[].resources.requests.\"nvidia.com/gpu\" || '' }}"
|
||||
operator = "NotEquals"
|
||||
value = ""
|
||||
},
|
||||
{
|
||||
key = "{{ request.object.spec.containers[].resources.limits.\"nvidia.com/gpu\" || '' }}"
|
||||
operator = "NotEquals"
|
||||
value = ""
|
||||
}
|
||||
]
|
||||
}
|
||||
mutate = {
|
||||
patchesJson6902 = yamlencode([
|
||||
{
|
||||
op = "replace"
|
||||
path = "/spec/priorityClassName"
|
||||
value = "gpu-workload"
|
||||
},
|
||||
{
|
||||
op = "replace"
|
||||
path = "/spec/priority"
|
||||
value = 1200000
|
||||
},
|
||||
{
|
||||
op = "replace"
|
||||
path = "/spec/preemptionPolicy"
|
||||
value = "PreemptLowerPriority"
|
||||
}
|
||||
])
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,294 +0,0 @@
|
|||
# =============================================================================
|
||||
# Pod Security Policies (Audit Mode)
|
||||
# =============================================================================
|
||||
# Kyverno validate policies for pod security standards.
|
||||
# All policies start in Audit mode - violations are logged but not blocked.
|
||||
|
||||
resource "kubernetes_manifest" "policy_deny_privileged" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "deny-privileged-containers"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Deny Privileged Containers"
|
||||
"policies.kyverno.io/category" = "Pod Security"
|
||||
"policies.kyverno.io/severity" = "high"
|
||||
"policies.kyverno.io/description" = "Privileged containers have full host access. Deny unless explicitly exempted."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
validationFailureAction = "Audit"
|
||||
background = true
|
||||
rules = [{
|
||||
name = "deny-privileged"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
exclude = {
|
||||
any = [{
|
||||
resources = {
|
||||
namespaces = ["frigate", "nvidia", "monitoring"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
validate = {
|
||||
message = "Privileged containers are not allowed. Use specific capabilities instead."
|
||||
pattern = {
|
||||
spec = {
|
||||
containers = [{
|
||||
"=(securityContext)" = {
|
||||
"=(privileged)" = false
|
||||
}
|
||||
}]
|
||||
"=(initContainers)" = [{
|
||||
"=(securityContext)" = {
|
||||
"=(privileged)" = false
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [helm_release.kyverno]
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "policy_deny_host_namespaces" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "deny-host-namespaces"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Deny Host Namespaces"
|
||||
"policies.kyverno.io/category" = "Pod Security"
|
||||
"policies.kyverno.io/severity" = "high"
|
||||
"policies.kyverno.io/description" = "Sharing host namespaces enables container escapes. Deny hostNetwork, hostPID, hostIPC."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
validationFailureAction = "Audit"
|
||||
background = true
|
||||
rules = [{
|
||||
name = "deny-host-namespaces"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
exclude = {
|
||||
any = [{
|
||||
resources = {
|
||||
namespaces = ["frigate", "monitoring"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
validate = {
|
||||
message = "Host namespaces (hostNetwork, hostPID, hostIPC) are not allowed."
|
||||
pattern = {
|
||||
spec = {
|
||||
"=(hostNetwork)" = false
|
||||
"=(hostPID)" = false
|
||||
"=(hostIPC)" = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [helm_release.kyverno]
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "policy_restrict_capabilities" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "restrict-sys-admin"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Restrict SYS_ADMIN Capability"
|
||||
"policies.kyverno.io/category" = "Pod Security"
|
||||
"policies.kyverno.io/severity" = "high"
|
||||
"policies.kyverno.io/description" = "SYS_ADMIN is nearly equivalent to root. Restrict to explicitly exempted namespaces."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
validationFailureAction = "Audit"
|
||||
background = true
|
||||
rules = [{
|
||||
name = "restrict-sys-admin"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
exclude = {
|
||||
any = [{
|
||||
resources = {
|
||||
namespaces = ["nvidia", "monitoring"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
validate = {
|
||||
message = "Adding SYS_ADMIN capability is not allowed."
|
||||
deny = {
|
||||
conditions = {
|
||||
any = [{
|
||||
key = "{{ request.object.spec.containers[].securityContext.capabilities.add[] || `[]` }}"
|
||||
operator = "AnyIn"
|
||||
value = ["SYS_ADMIN"]
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [helm_release.kyverno]
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Image Pull Policy Governance
|
||||
# =============================================================================
|
||||
# Mutate imagePullPolicy to IfNotPresent for all containers with pinned tags
|
||||
# (non-:latest). This prevents pods from getting stuck in ImagePullBackOff
|
||||
# when the pull-through cache at 10.0.20.10 has transient failures.
|
||||
# For :latest or untagged images, set to Always so stale images don't persist.
|
||||
|
||||
resource "kubernetes_manifest" "policy_set_image_pull_policy" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "set-image-pull-policy"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Set Image Pull Policy"
|
||||
"policies.kyverno.io/category" = "Best Practices"
|
||||
"policies.kyverno.io/severity" = "medium"
|
||||
"policies.kyverno.io/description" = "Set imagePullPolicy to IfNotPresent for pinned tags and Always for :latest to prevent ImagePullBackOff from transient cache failures."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
background = false
|
||||
rules = [
|
||||
{
|
||||
name = "set-ifnotpresent-for-pinned-tags"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
mutate = {
|
||||
foreach = [{
|
||||
list = "request.object.spec.containers"
|
||||
preconditions = {
|
||||
all = [{
|
||||
key = "{{ ends_with(element.image, ':latest') || !contains(element.image, ':') }}"
|
||||
operator = "Equals"
|
||||
value = false
|
||||
}]
|
||||
}
|
||||
patchStrategicMerge = {
|
||||
spec = {
|
||||
containers = [{
|
||||
name = "{{ element.name }}"
|
||||
imagePullPolicy = "IfNotPresent"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "set-always-for-latest"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
mutate = {
|
||||
foreach = [{
|
||||
list = "request.object.spec.containers"
|
||||
preconditions = {
|
||||
all = [{
|
||||
key = "{{ ends_with(element.image, ':latest') || !contains(element.image, ':') }}"
|
||||
operator = "Equals"
|
||||
value = true
|
||||
}]
|
||||
}
|
||||
patchStrategicMerge = {
|
||||
spec = {
|
||||
containers = [{
|
||||
name = "{{ element.name }}"
|
||||
imagePullPolicy = "Always"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [helm_release.kyverno]
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "policy_require_trusted_registries" {
|
||||
manifest = {
|
||||
apiVersion = "kyverno.io/v1"
|
||||
kind = "ClusterPolicy"
|
||||
metadata = {
|
||||
name = "require-trusted-registries"
|
||||
annotations = {
|
||||
"policies.kyverno.io/title" = "Require Trusted Image Registries"
|
||||
"policies.kyverno.io/category" = "Pod Security"
|
||||
"policies.kyverno.io/severity" = "medium"
|
||||
"policies.kyverno.io/description" = "Images must come from trusted registries to prevent supply chain attacks."
|
||||
}
|
||||
}
|
||||
spec = {
|
||||
validationFailureAction = "Audit"
|
||||
background = true
|
||||
rules = [{
|
||||
name = "validate-registries"
|
||||
match = {
|
||||
any = [{
|
||||
resources = {
|
||||
kinds = ["Pod"]
|
||||
}
|
||||
}]
|
||||
}
|
||||
validate = {
|
||||
message = "Images must be from trusted registries (docker.io, ghcr.io, quay.io, registry.k8s.io, or local cache)."
|
||||
pattern = {
|
||||
spec = {
|
||||
containers = [{
|
||||
image = "docker.io/* | ghcr.io/* | quay.io/* | registry.k8s.io/* | 10.0.20.10* | */*"
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [helm_release.kyverno]
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
firmly-gerardo-generated@viktorbarzin.me me@viktorbarzin.me
|
||||
closely-keith-generated@viktorbarzin.me vbarzin@gmail.com
|
||||
literally-paolo-generated@viktorbarzin.me viktorbarzin@fb.com
|
||||
hastily-stefanie-generated@viktorbarzin.me elliestamenova@gmail.com
|
||||
vaultwarden@viktorbarzin.me me@viktorbarzin.me
|
||||
|
|
@ -1,504 +0,0 @@
|
|||
variable "tls_secret_name" {}
|
||||
variable "tier" { type = string }
|
||||
variable "mailserver_accounts" {}
|
||||
variable "postfix_account_aliases" {}
|
||||
variable "opendkim_key" {}
|
||||
variable "sasl_passwd" {} # For sendgrid i.e relayhost
|
||||
variable "nfs_server" { type = string }
|
||||
|
||||
resource "kubernetes_namespace" "mailserver" {
|
||||
metadata {
|
||||
name = "mailserver"
|
||||
labels = {
|
||||
tier = var.tier
|
||||
}
|
||||
# connecting via localhost does not seem to work?
|
||||
# labels = {
|
||||
# "istio-injection" : "enabled"
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
module "tls_secret" {
|
||||
source = "../../../../modules/kubernetes/setup_tls_secret"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
tls_secret_name = var.tls_secret_name
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "mailserver_env_config" {
|
||||
metadata {
|
||||
name = "mailserver.env.config"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
labels = {
|
||||
app = "mailserver"
|
||||
}
|
||||
annotations = {
|
||||
"reloader.stakater.com/match" = "true"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
DMS_DEBUG = "0"
|
||||
# LOG_LEVEL = "debug"
|
||||
ENABLE_CLAMAV = "0"
|
||||
ENABLE_AMAVIS = "0"
|
||||
ENABLE_FAIL2BAN = "0"
|
||||
ENABLE_FETCHMAIL = "0"
|
||||
ENABLE_POSTGREY = "0"
|
||||
ENABLE_SASLAUTHD = "0"
|
||||
ENABLE_SPAMASSASSIN = "0"
|
||||
ENABLE_RSPAMD = "1"
|
||||
ENABLE_OPENDKIM = "0"
|
||||
ENABLE_OPENDMARC = "0"
|
||||
RSPAMD_LEARN = "1"
|
||||
ENABLE_SRS = "1"
|
||||
FETCHMAIL_POLL = "120"
|
||||
ONE_DIR = "1"
|
||||
OVERRIDE_HOSTNAME = "mail.viktorbarzin.me"
|
||||
POSTFIX_MESSAGE_SIZE_LIMIT = 1024 * 1024 * 200 # 200 MB
|
||||
POSTFIX_REJECT_UNKNOWN_CLIENT_HOSTNAME = "1"
|
||||
# TLS_LEVEL = "intermediate"
|
||||
# DEFAULT_RELAY_HOST = "[smtp.sendgrid.net]:587"
|
||||
DEFAULT_RELAY_HOST = "[smtp.eu.mailgun.org]:587"
|
||||
SPOOF_PROTECTION = "1"
|
||||
SSL_TYPE = "manual"
|
||||
SSL_CERT_PATH = "/tmp/ssl/tls.crt"
|
||||
SSL_KEY_PATH = "/tmp/ssl/tls.key"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "mailserver_config" {
|
||||
metadata {
|
||||
name = "mailserver.config"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
|
||||
labels = {
|
||||
app = "mailserver"
|
||||
}
|
||||
annotations = {
|
||||
"reloader.stakater.com/match" = "true"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
# Actual mail settings
|
||||
"postfix-accounts.cf" = join("\n", [for user, pass in var.mailserver_accounts : "${user}|${bcrypt(pass, 6)}"])
|
||||
"postfix-main.cf" = var.postfix_cf
|
||||
"postfix-virtual.cf" = format("%s%s", var.postfix_account_aliases, file("${path.module}/extra/aliases.txt"))
|
||||
|
||||
KeyTable = "mail._domainkey.viktorbarzin.me viktorbarzin.me:mail:/etc/opendkim/keys/viktorbarzin.me-mail.key\n"
|
||||
SigningTable = "*@viktorbarzin.me mail._domainkey.viktorbarzin.me\n"
|
||||
TrustedHosts = "127.0.0.1\nlocalhost\n"
|
||||
"sasl_passwd" = var.sasl_passwd
|
||||
# Rspamd DKIM signing configuration
|
||||
"dkim_signing.conf" = <<-EOF
|
||||
enabled = true;
|
||||
sign_authenticated = true;
|
||||
sign_local = true;
|
||||
use_domain = "header";
|
||||
use_redis = false;
|
||||
use_esld = true;
|
||||
selector = "mail";
|
||||
path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private";
|
||||
domain {
|
||||
viktorbarzin.me {
|
||||
path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private";
|
||||
selector = "mail";
|
||||
}
|
||||
}
|
||||
EOF
|
||||
fail2ban_conf = <<-EOF
|
||||
[DEFAULT]
|
||||
|
||||
#logtarget = /var/log/fail2ban.log
|
||||
logtarget = SYSOUT
|
||||
EOF
|
||||
}
|
||||
# Password hashes are different each time and avoid changing secret constantly.
|
||||
# Either 1.Create consistent hashes or 2.Find a way to ignore_changes on per password
|
||||
lifecycle {
|
||||
ignore_changes = [data["postfix-accounts.cf"]]
|
||||
}
|
||||
}
|
||||
|
||||
# resource "kubernetes_config_map" "user_patches" {
|
||||
# metadata {
|
||||
# name = "user-patches"
|
||||
# namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
# labels = {
|
||||
# "app" = "mailserver"
|
||||
# }
|
||||
# }
|
||||
|
||||
# data = {
|
||||
# user_patches = <<EOF
|
||||
# #!/bin/bash
|
||||
# cp -f /tmp/dovecot.key /etc/dovecot/ssl/dovecot.key
|
||||
# cp -f /tmp/dovecot.crt /etc/dovecot/ssl/dovecot.pem
|
||||
# EOF
|
||||
# }
|
||||
# }
|
||||
|
||||
resource "kubernetes_secret" "opendkim_key" {
|
||||
metadata {
|
||||
name = "mailserver.opendkim.key"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
labels = {
|
||||
"app" = "mailserver"
|
||||
}
|
||||
}
|
||||
type = "Opaque"
|
||||
data = {
|
||||
"viktorbarzin.me-mail.key" = var.opendkim_key
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module "nfs_data" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "mailserver-data"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/mailserver"
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "mailserver" {
|
||||
metadata {
|
||||
name = "mailserver"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
labels = {
|
||||
"app" = "mailserver"
|
||||
tier = var.tier
|
||||
}
|
||||
annotations = {
|
||||
"reloader.stakater.com/search" = "true"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = "1"
|
||||
strategy {
|
||||
type = "Recreate"
|
||||
}
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "mailserver"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
annotations = {
|
||||
# "diun.enable" = "true"
|
||||
}
|
||||
labels = {
|
||||
"app" = "mailserver"
|
||||
"role" = "mail"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "docker-mailserver"
|
||||
image = "docker.io/mailserver/docker-mailserver:15.0.0"
|
||||
image_pull_policy = "IfNotPresent"
|
||||
security_context {
|
||||
capabilities {
|
||||
add = ["NET_ADMIN"]
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
post_start {
|
||||
exec {
|
||||
command = [
|
||||
"postmap",
|
||||
"/etc/postfix/sasl/passwd"
|
||||
# "/bin/sh",
|
||||
# "-c",
|
||||
# "cp -f /tmp/user-patches.sh /tmp/docker-mailserver/user-patches.sh && chown root:root /var/log/mail && chmod 755 /var/log/mail",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "config-tls"
|
||||
mount_path = "/tmp/ssl/tls.key"
|
||||
sub_path = "tls.key"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config-tls"
|
||||
mount_path = "/tmp/ssl/tls.crt"
|
||||
sub_path = "tls.crt"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/postfix-accounts.cf"
|
||||
sub_path = "postfix-accounts.cf"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/postfix-main.cf"
|
||||
sub_path = "postfix-main.cf"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/postfix-virtual.cf"
|
||||
sub_path = "postfix-virtual.cf"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/fetchmail.cf"
|
||||
sub_path = "fetchmail.cf"
|
||||
read_only = true
|
||||
}
|
||||
# volume_mount {
|
||||
# name = "config"
|
||||
# mount_path = "/tmp/docker-mailserver/dovecot.cf"
|
||||
# sub_path = "dovecot.cf"
|
||||
# read_only = true
|
||||
# }
|
||||
# volume_mount {
|
||||
# name = "user-patches"
|
||||
# mount_path = "/tmp/user-patches.sh"
|
||||
# sub_path = "user-patches.sh"
|
||||
# read_only = true
|
||||
# }
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/opendkim/SigningTable"
|
||||
sub_path = "SigningTable"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/opendkim/KeyTable"
|
||||
sub_path = "KeyTable"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/opendkim/TrustedHosts"
|
||||
sub_path = "TrustedHosts"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "opendkim-key"
|
||||
mount_path = "/tmp/docker-mailserver/opendkim/keys"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "opendkim-key"
|
||||
mount_path = "/tmp/docker-mailserver/rspamd/dkim/viktorbarzin.me/mail.private"
|
||||
sub_path = "viktorbarzin.me-mail.key"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/tmp/docker-mailserver/rspamd/override.d/dkim_signing.conf"
|
||||
sub_path = "dkim_signing.conf"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/var/mail"
|
||||
sub_path = "data"
|
||||
}
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/var/mail-state"
|
||||
sub_path = "state"
|
||||
}
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/var/log/mail"
|
||||
sub_path = "log"
|
||||
}
|
||||
volume_mount {
|
||||
name = "var-run-dovecot"
|
||||
mount_path = "/var/run/dovecot"
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/etc/postfix/sasl/passwd"
|
||||
sub_path = "sasl_passwd"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "config"
|
||||
mount_path = "/etc/fail2ban/fail2ban.local"
|
||||
sub_path = "fail2ban_conf"
|
||||
read_only = true
|
||||
}
|
||||
port {
|
||||
name = "smtp"
|
||||
container_port = 25
|
||||
protocol = "TCP"
|
||||
}
|
||||
port {
|
||||
name = "smtp-secure"
|
||||
container_port = 465
|
||||
protocol = "TCP"
|
||||
}
|
||||
port {
|
||||
name = "smtp-auth"
|
||||
container_port = 587
|
||||
protocol = "TCP"
|
||||
}
|
||||
port {
|
||||
name = "imap-secure"
|
||||
container_port = 993
|
||||
protocol = "TCP"
|
||||
}
|
||||
env_from {
|
||||
config_map_ref {
|
||||
name = "mailserver.env.config"
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "25m"
|
||||
memory = "512Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "512Mi"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
container {
|
||||
name = "dovecot-exporter"
|
||||
image = "viktorbarzin/dovecot_exporter:latest"
|
||||
command = [
|
||||
"/dovecot_exporter/exporter",
|
||||
"--dovecot.socket-path=/var/run/dovecot/stats-reader"
|
||||
]
|
||||
image_pull_policy = "IfNotPresent"
|
||||
port {
|
||||
name = "dovecotexporter"
|
||||
container_port = 9166
|
||||
protocol = "TCP"
|
||||
}
|
||||
volume_mount {
|
||||
name = "var-run-dovecot"
|
||||
mount_path = "/var/run/dovecot"
|
||||
}
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "10m"
|
||||
memory = "32Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "32Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "config"
|
||||
config_map {
|
||||
name = "mailserver.config"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "config-tls"
|
||||
secret {
|
||||
secret_name = var.tls_secret_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "opendkim-key"
|
||||
secret {
|
||||
secret_name = "mailserver.opendkim.key"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_data.claim_name
|
||||
}
|
||||
# iscsi {
|
||||
# target_portal = "iscsi.viktorbarzin.lan:3260"
|
||||
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:mailserver"
|
||||
# lun = 0
|
||||
# fs_type = "ext4"
|
||||
# }
|
||||
}
|
||||
# volume {
|
||||
# name = "user-patches"
|
||||
# config_map {
|
||||
# name = "user-patches"
|
||||
# }
|
||||
# }
|
||||
volume {
|
||||
name = "var-run-dovecot"
|
||||
empty_dir {}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "mailserver" {
|
||||
metadata {
|
||||
name = "mailserver"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
|
||||
labels = {
|
||||
app = "mailserver"
|
||||
}
|
||||
|
||||
annotations = {
|
||||
"metallb.io/loadBalancerIPs" = "10.0.20.200"
|
||||
"metallb.io/allow-shared-ip" = "shared"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
type = "LoadBalancer"
|
||||
external_traffic_policy = "Cluster"
|
||||
selector = {
|
||||
app = "mailserver"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "smtp"
|
||||
protocol = "TCP"
|
||||
port = 25
|
||||
target_port = "smtp"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "smtp-secure"
|
||||
protocol = "TCP"
|
||||
port = 465
|
||||
target_port = "smtp-secure"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "smtp-auth"
|
||||
protocol = "TCP"
|
||||
port = 587
|
||||
target_port = "smtp-auth"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "imap-secure"
|
||||
protocol = "TCP"
|
||||
port = 993
|
||||
target_port = "imap-secure"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,237 +0,0 @@
|
|||
variable "roundcube_db_password" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "mysql_host" { type = string }
|
||||
|
||||
module "nfs_roundcube_html" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "roundcubemail-html"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/roundcubemail/html"
|
||||
}
|
||||
|
||||
module "nfs_roundcube_enigma" {
|
||||
source = "../../../../modules/kubernetes/nfs_volume"
|
||||
name = "roundcubemail-enigma"
|
||||
namespace = kubernetes_namespace.mailserver.metadata[0].name
|
||||
nfs_server = var.nfs_server
|
||||
nfs_path = "/mnt/main/roundcubemail/enigma"
|
||||
}
|
||||
|
||||
# If you want to override settings mount this in /var/roundcube/config
|
||||
# more info in https://github.com/roundcube/roundcubemail-docker?tab=readme-ov-file
|
||||
# resource "kubernetes_config_map" "roundcubemail_config" {
|
||||
# metadata {
|
||||
# name = "roundcubemail.config"
|
||||
# namespace = "mailserver"
|
||||
|
||||
# labels = {
|
||||
# app = "mailserver"
|
||||
# }
|
||||
# annotations = {
|
||||
# "reloader.stakater.com/match" = "true"
|
||||
# }
|
||||
# }
|
||||
|
||||
# data = {
|
||||
# # if you want to override things see https://github.com/roundcube/roundcubemail/blob/master/config/defaults.inc.php
|
||||
# "imap.php" = <<-EOF
|
||||
# <?php
|
||||
# $config['imap_host'] = 'ssl://mail.viktorbarzin.me:993';
|
||||
# ?>
|
||||
# EOF
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
resource "kubernetes_deployment" "roundcubemail" {
|
||||
metadata {
|
||||
name = "roundcubemail"
|
||||
namespace = "mailserver"
|
||||
labels = {
|
||||
"app" = "roundcubemail"
|
||||
tier = var.tier
|
||||
}
|
||||
annotations = {
|
||||
"reloader.stakater.com/search" = "true"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = "1"
|
||||
strategy {
|
||||
type = "RollingUpdate"
|
||||
}
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "roundcubemail"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "roundcubemail"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "roundcube"
|
||||
image = "roundcube/roundcubemail:1.6.13-apache"
|
||||
# Uncomment me to mount additional settings
|
||||
# volume_mount {
|
||||
# name = "imap-config"
|
||||
# mount_path = "/var/roundcube/config/imap.php"
|
||||
# sub_path = "imap.php"
|
||||
# }
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DEFAULT_HOST"
|
||||
value = "ssl://mail.viktorbarzin.me" # tls cert must be valid!
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DEFAULT_PORT"
|
||||
value = "993"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_SMTP_SERVER"
|
||||
value = "tls://mail.viktorbarzin.me" # tls cert must be valid!
|
||||
}
|
||||
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_SMTP_PORT"
|
||||
value = 587
|
||||
}
|
||||
|
||||
# DB Settings
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DB_TYPE"
|
||||
value = "mysql"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DB_HOST"
|
||||
value = var.mysql_host
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DB_USER"
|
||||
value = "roundcubemail"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DB_PASSWORD"
|
||||
value = var.roundcube_db_password
|
||||
}
|
||||
# Plugins
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_COMPOSER_PLUGINS"
|
||||
value = "mmvi/twofactor_webauthn,texxasrulez/persistent_login,dsoares/rcguard"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_PLUGINS"
|
||||
value = "attachment_reminder,database_attachments,enigma,twofactor_webauthn,persistent_login,rcguard"
|
||||
}
|
||||
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_SMTP_DEBUG"
|
||||
value = "false"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_DEBUG_LEVEL"
|
||||
value = "1"
|
||||
}
|
||||
env {
|
||||
name = "ROUNDCUBEMAIL_LOG_DRIVER"
|
||||
# value = "file"
|
||||
value = "syslog"
|
||||
}
|
||||
port {
|
||||
name = "web"
|
||||
container_port = 80
|
||||
protocol = "TCP"
|
||||
}
|
||||
volume_mount {
|
||||
name = "html"
|
||||
mount_path = "/var/www/html"
|
||||
}
|
||||
volume_mount {
|
||||
name = "enigma"
|
||||
mount_path = "/var/roundcube/enigma"
|
||||
}
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "25m"
|
||||
memory = "192Mi"
|
||||
}
|
||||
limits = {
|
||||
memory = "192Mi"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# volume {
|
||||
# name = "imap-config"
|
||||
# config_map {
|
||||
# name = "roundcubemail.config"
|
||||
# }
|
||||
# }
|
||||
|
||||
volume {
|
||||
name = "html"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_roundcube_html.claim_name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "enigma"
|
||||
persistent_volume_claim {
|
||||
claim_name = module.nfs_roundcube_enigma.claim_name
|
||||
}
|
||||
}
|
||||
dns_config {
|
||||
option {
|
||||
name = "ndots"
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "roundcubemail" {
|
||||
metadata {
|
||||
name = "roundcubemail"
|
||||
namespace = "mailserver"
|
||||
|
||||
labels = {
|
||||
app = "roundcubemail"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = "roundcubemail"
|
||||
}
|
||||
|
||||
port {
|
||||
name = "roundcube"
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "../../../../modules/kubernetes/ingress_factory"
|
||||
namespace = "mailserver"
|
||||
name = "mail"
|
||||
service_name = "roundcubemail"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
rybbit_site_id = "082f164faa7d"
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "Roundcube Mail"
|
||||
"gethomepage.dev/description" = "Webmail client"
|
||||
"gethomepage.dev/icon" = "roundcube.png"
|
||||
"gethomepage.dev/group" = "Other"
|
||||
"gethomepage.dev/pod-selector" = ""
|
||||
}
|
||||
}
|
||||
|
|
@ -1,163 +0,0 @@
|
|||
# this is appended and merged to the main postfix.cf
|
||||
# see defaults - https://github.com/docker-mailserver/docker-mailserver/blob/master/target/postfix/main.cf
|
||||
variable "postfix_cf" {
|
||||
default = <<EOT
|
||||
#relayhost = [smtp.sendgrid.net]:587
|
||||
relayhost = [smtp.eu.mailgun.org]:587
|
||||
smtp_sasl_auth_enable = yes
|
||||
smtp_sasl_password_maps = hash:/etc/postfix/sasl/passwd
|
||||
smtp_sasl_security_options = noanonymous
|
||||
smtp_sasl_tls_security_options = noanonymous
|
||||
smtp_tls_security_level = encrypt
|
||||
smtpd_tls_cert_file=/tmp/ssl/tls.crt
|
||||
smtpd_tls_key_file=/tmp/ssl/tls.key
|
||||
smtpd_use_tls=yes
|
||||
header_size_limit = 4096000
|
||||
|
||||
# Debug mail tls
|
||||
smtpd_tls_loglevel = 1
|
||||
#smtpd_tls_ciphers = TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:!aNULL:!SEED:!CAMELLIA:!RSA+AES:!SHA1
|
||||
#tls_medium_cipherlist = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:!aNULL:!SEED:!CAMELLIA:!RSA+AES:!SHA1
|
||||
|
||||
# Rate limiting (brute-force protection)
|
||||
smtpd_client_connection_rate_limit = 10
|
||||
smtpd_client_message_rate_limit = 30
|
||||
anvil_rate_time_unit = 60s
|
||||
EOT
|
||||
}
|
||||
|
||||
variable "postfix_cf_reference_DO_NOT_USE" {
|
||||
default = <<EOT
|
||||
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
|
||||
|
||||
smtpd_banner = $myhostname ESMTP $mail_name (Debian)
|
||||
biff = no
|
||||
append_dot_mydomain = no
|
||||
readme_directory = no
|
||||
|
||||
# Basic configuration
|
||||
# myhostname =
|
||||
alias_maps = hash:/etc/aliases
|
||||
alias_database = hash:/etc/aliases
|
||||
mydestination = $myhostname, localhost.$mydomain, localhost
|
||||
mynetworks = 127.0.0.0/8 [::1]/128 [fe80::]/64
|
||||
mailbox_size_limit = 0
|
||||
recipient_delimiter = +
|
||||
inet_interfaces = all
|
||||
inet_protocols = ipv4
|
||||
|
||||
# TLS parameters
|
||||
smtpd_tls_cert_file=/tmp/ssl/tls.crt
|
||||
smtpd_tls_key_file=/tmp/ssl/tls.key
|
||||
#smtpd_tls_CAfile=
|
||||
#smtp_tls_CAfile=
|
||||
smtpd_tls_security_level = may
|
||||
smtpd_use_tls=yes
|
||||
smtpd_tls_loglevel = 1
|
||||
smtp_tls_loglevel = 1
|
||||
tls_ssl_options = NO_COMPRESSION
|
||||
tls_high_cipherlist = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS
|
||||
tls_preempt_cipherlist = yes
|
||||
smtpd_tls_protocols = !SSLv2,!SSLv3
|
||||
smtp_tls_protocols = !SSLv2,!SSLv3
|
||||
smtpd_tls_mandatory_ciphers = high
|
||||
smtpd_tls_mandatory_protocols = !SSLv2,!SSLv3
|
||||
smtpd_tls_exclude_ciphers = aNULL, LOW, EXP, MEDIUM, ADH, AECDH, MD5, DSS, ECDSA, CAMELLIA128, 3DES, CAMELLIA256, RSA+AES, eNULL
|
||||
smtpd_tls_dh1024_param_file = /etc/postfix/dhparams.pem
|
||||
smtpd_tls_CApath = /etc/ssl/certs
|
||||
smtp_tls_CApath = /etc/ssl/certs
|
||||
|
||||
# Settings to prevent SPAM early
|
||||
smtpd_helo_required = yes
|
||||
smtpd_delay_reject = yes
|
||||
smtpd_helo_restrictions = permit_mynetworks, reject_invalid_helo_hostname, permit
|
||||
#smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
|
||||
#smtpd_relay_restrictions = reject_sender_login_mismatch permit_sasl_authenticated permit_mynetworks defer_unauth_destination
|
||||
smtpd_relay_restrictions = reject_sender_login_mismatch permit_sasl_authenticated permit_mynetworks defer_unauth_destination
|
||||
smtpd_recipient_restrictions = permit_sasl_authenticated, reject_unauth_destination, reject_unauth_pipelining, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, reject_unknown_recipient_domain, reject_rbl_client bl.spamcop.net, permit_mynetworks
|
||||
smtpd_client_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_unauth_destination, reject_unauth_pipelining
|
||||
#smtpd_sender_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, permit_mynetworks, reject_unknown_sender_domain
|
||||
smtpd_sender_restrictions = reject_sender_login_mismatch, reject_authenticated_sender_login_mismatch, reject_unknown_sender_domain, permit_sasl_authenticated, permit_mynetworks
|
||||
disable_vrfy_command = yes
|
||||
|
||||
# Postscreen settings to drop zombies/open relays/spam early
|
||||
#postscreen_dnsbl_action = enforce
|
||||
postscreen_dnsbl_action = ignore
|
||||
postscreen_dnsbl_sites = zen.spamhaus.org*2
|
||||
bl.mailspike.net
|
||||
b.barracudacentral.org*2
|
||||
bl.spameatingmonkey.net
|
||||
bl.spamcop.net
|
||||
dnsbl.sorbs.net
|
||||
psbl.surriel.com
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-3
|
||||
list.dnswl.org=127.0.[0..255].[2..3]*-4
|
||||
postscreen_dnsbl_threshold = 3
|
||||
postscreen_dnsbl_whitelist_threshold = -1
|
||||
postscreen_greet_action = enforce
|
||||
postscreen_bare_newline_action = enforce
|
||||
|
||||
# SASL
|
||||
smtpd_sasl_auth_enable = no
|
||||
#smtpd_sasl_auth_enable = yes
|
||||
##smtpd_sasl_path = /var/spool/postfix/private/auth
|
||||
#smtpd_sasl_path = /var/spool/postfix/private/smtpd
|
||||
##smtpd_sasl_type = dovecot
|
||||
#smtpd_sasl_type = dovecot
|
||||
##smtpd_sasl_security_options = noanonymous
|
||||
#smtpd_sasl_security_options = noanonymous
|
||||
##smtpd_sasl_local_domain = $mydomain
|
||||
##broken_sasl_auth_clients = yes
|
||||
#broken_sasl_auth_clients = yes
|
||||
|
||||
# SMTP configuration
|
||||
smtp_sasl_auth_enable = yes
|
||||
smtp_sasl_password_maps = hash:/etc/postfix/sasl/passwd
|
||||
smtp_sasl_security_options = noanonymous
|
||||
smtp_sasl_tls_security_options = noanonymous
|
||||
smtp_tls_security_level = encrypt
|
||||
header_size_limit = 4096000
|
||||
relayhost = [smtp.sendgrid.net]:587
|
||||
|
||||
# Mail directory
|
||||
virtual_transport = lmtp:unix:/var/run/dovecot/lmtp
|
||||
virtual_mailbox_domains = /etc/postfix/vhost
|
||||
virtual_mailbox_maps = texthash:/etc/postfix/vmailbox
|
||||
virtual_alias_maps = texthash:/etc/postfix/virtual
|
||||
|
||||
# Additional option for filtering
|
||||
content_filter = smtp-amavis:[127.0.0.1]:10024
|
||||
|
||||
# Milters used by DKIM
|
||||
milter_protocol = 6
|
||||
milter_default_action = accept
|
||||
dkim_milter = inet:localhost:8891
|
||||
dmarc_milter = inet:localhost:8893
|
||||
smtpd_milters = $dkim_milter,$dmarc_milter
|
||||
non_smtpd_milters = $dkim_milter
|
||||
|
||||
# SPF policy settings
|
||||
policyd-spf_time_limit = 3600
|
||||
|
||||
# Header checks for content inspection on receiving
|
||||
header_checks = pcre:/etc/postfix/maps/header_checks.pcre
|
||||
|
||||
# Remove unwanted headers that reveail our privacy
|
||||
smtp_header_checks = pcre:/etc/postfix/maps/sender_header_filter.pcre
|
||||
myhostname = mail.viktorbarzin.me
|
||||
mydomain = viktorbarzin.me
|
||||
smtputf8_enable = no
|
||||
message_size_limit = 20480000
|
||||
sender_canonical_maps = tcp:localhost:10001
|
||||
sender_canonical_classes = envelope_sender
|
||||
recipient_canonical_maps = tcp:localhost:10002
|
||||
recipient_canonical_classes = envelope_recipient,header_recipient
|
||||
compatibility_level = 2
|
||||
# enable_original_recipient = no # b4 uncommenting see https://serverfault.com/questions/661615/how-to-drop-orig-to-using-postfix-virtual-domains
|
||||
always_add_missing_headers = yes
|
||||
|
||||
anvil_status_update_time = 5s
|
||||
EOT
|
||||
}
|
||||
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
# Creates namespace and everythin needed
|
||||
# Do not use until https://github.com/colinwilson/terraform-kubernetes-metallb/issues/5 is solved
|
||||
# module "metallb" {
|
||||
# source = "colinwilson/metallb/kubernetes"
|
||||
# version = "0.1.7"
|
||||
# }
|
||||
variable "tier" { type = string }
|
||||
|
||||
resource "kubernetes_namespace" "metallb" {
|
||||
metadata {
|
||||
name = "metallb-system"
|
||||
labels = {
|
||||
app = "metallb"
|
||||
# "istio-injection" : "disabled"
|
||||
# tier = var.tier
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "metallb" {
|
||||
source = "ViktorBarzin/metallb/kubernetes"
|
||||
version = "0.1.5"
|
||||
depends_on = [kubernetes_namespace.metallb]
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "config" {
|
||||
metadata {
|
||||
name = "config"
|
||||
namespace = kubernetes_namespace.metallb.metadata[0].name
|
||||
}
|
||||
data = {
|
||||
config = <<EOT
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: layer2
|
||||
addresses:
|
||||
- 10.0.20.200-10.0.20.220
|
||||
EOT
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue