[ci skip] Remove legacy files and orphaned modules

Delete 20 orphaned module directories and 3 stray files from
modules/kubernetes/ that are no longer referenced by any stack.
Remove 7 root-level legacy files including the empty tfstate,
27MB terraform zip, commented-out main.tf, and migration notes.
Clean up commented-out dockerhub_secret and oauth-proxy references
in blog, travel_blog, and city-guesser stacks. Remove stale
frigate config.yaml entry from .gitignore. Remove ephemeral
docs/plans/ directory.
This commit is contained in:
Viktor Barzin 2026-02-22 15:23:27 +00:00
parent c7c7047f1c
commit 116c4d9c30
56 changed files with 2 additions and 9402 deletions

View file

@ -1,178 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "authelia" {
metadata {
name = "authelia"
labels = {
"istio-injection" : "disabled"
}
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.authelia.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "helm_release" "authelia" {
namespace = kubernetes_namespace.authelia.metadata[0].name
name = "authelia"
atomic = true
repository = "https://charts.authelia.com"
chart = "authelia"
version = "0.10.49"
depends_on = [kubernetes_namespace.authelia]
values = [templatefile("${path.module}/values.yaml", {})]
}
# resource "kubernetes_config_map" "configuration" {
# metadata {
# name = "configuration"
# namespace = kubernetes_namespace.authelia.metadata[0].name
# labels = {
# app = "configuration"
# }
# annotations = {
# "reloader.stakater.com/match" = "true"
# }
# }
# data = {
# # "configuration.yml" = yamldecode(file("${path.module}/configuration.yml"))
# "configuration.yml" = file("${path.module}/configuration.yml")
# "users_database.yml" = file("${path.module}/users_database.yml")
# }
# }
# resource "kubernetes_deployment" "authelia" {
# metadata {
# name = "authelia"
# namespace = kubernetes_namespace.authelia.metadata[0].name
# labels = {
# app = "authelia"
# }
# annotations = {
# "reloader.stakater.com/search" = "true"
# }
# }
# spec {
# replicas = 1
# selector {
# match_labels = {
# app = "authelia"
# }
# }
# template {
# metadata {
# labels = {
# app = "authelia"
# }
# }
# spec {
# container {
# image = "authelia/authelia:4.38"
# name = "authelia"
# # command = ["tail", "-f", "/etc/passwd"]
# port {
# container_port = 9091
# }
# port {
# container_port = 8080
# }
# volume_mount {
# name = "config"
# # mount_path = "/etc/authelia/configuration.yml"
# mount_path = "/config/configuration.yml"
# sub_path = "configuration.yml"
# }
# volume_mount {
# name = "users-database"
# # mount_path = "/etc/authelia/users_database.yml"
# mount_path = "/config/users_database.yml"
# sub_path = "users_database.yml"
# }
# }
# volume {
# name = "config"
# config_map {
# name = "configuration"
# }
# }
# volume {
# name = "users-database"
# config_map {
# name = "configuration"
# }
# }
# }
# }
# }
# }
# resource "kubernetes_service" "authelia" {
# metadata {
# name = "authelia"
# namespace = kubernetes_namespace.authelia.metadata[0].name
# labels = {
# "app" = "authelia"
# }
# }
# spec {
# selector = {
# app = "authelia"
# }
# port {
# name = "http"
# port = 80
# protocol = "TCP"
# # target_port = 8080
# target_port = 9091
# }
# }
# }
# resource "kubernetes_ingress_v1" "authelia" {
# metadata {
# name = "authelia"
# namespace = kubernetes_namespace.authelia.metadata[0].name
# annotations = {
# "kubernetes.io/ingress.class" = "nginx"
# # "nginx.ingress.kubernetes.io/affinity" = "cookie"
# # "nginx.ingress.kubernetes.io/auth-tls-verify-client" = "on"
# # "nginx.ingress.kubernetes.io/auth-tls-secret" = "default/ca-secret"
# # "nginx.ingress.kubernetes.io/auth-url" : "https://oauth2.viktorbarzin.me/oauth2/auth"
# # "nginx.ingress.kubernetes.io/auth-signin" : "https://oauth2.viktorbarzin.me/oauth2/start?rd=/redirect/$http_host$escaped_request_uri"
# }
# }
# spec {
# tls {
# hosts = ["auth.viktorbarzin.me"]
# secret_name = var.tls_secret_name
# }
# rule {
# host = "auth.viktorbarzin.me"
# http {
# path {
# path = "/"
# backend {
# service {
# name = "authelia"
# port {
# number = 80
# }
# }
# }
# }
# }
# }
# }
# }

View file

@ -1,10 +0,0 @@
users:
authelia:
disabled: false
displayname: "Viktor"
# Password is authelia
password: "$6$rounds=50000$BpLnfgDsc2WD8F2q$Zis.ixdg9s/UOJYrs56b5QEZFiZECu0qZVNsIYxBaNJ7ucIL.nlxVCT5tqh8KHG8X4tlwCFm5r6NTOZZ5qRFN/" # yamllint disable-line rule:line-length
email: me@viktorbarzin.me
groups:
- admins
- dev

View file

@ -1,24 +0,0 @@
configMap:
session:
cookies:
- domain: 'authelia.viktorbarzin.me'
authelia_url: 'https://authelia.viktorbarzin.me'
storage:
local:
path: '/config/db.sqlite3'
theme: light
# Error 1: access_control (The warning)
access_control:
default_policy: 'one_factor' # Change to 'two_factor' once you have 2FA set up
rules:
- domain: "*.viktorbarzin.me"
policy: one_factor
# Error 2: authentication_backend (Where users are stored)
authentication_backend:
file:
path: /config/users.yml

View file

@ -1,93 +0,0 @@
variable "named_conf_mounts" {}
variable "deployment_name" {}
resource "kubernetes_deployment" "bind" {
metadata {
name = var.deployment_name
namespace = "bind"
labels = {
"app" = "bind"
"kubernetes.io/cluster-service" : "true"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = "3"
selector {
match_labels = {
"app" = var.deployment_name
}
}
template {
metadata {
labels = {
"app" = var.deployment_name
"kubernetes.io/cluster-service" : "true"
}
}
spec {
container {
name = "bind"
image = "resystit/bind9:latest"
image_pull_policy = "IfNotPresent"
port {
container_port = 53
protocol = "UDP"
}
volume_mount {
mount_path = "/etc/bind/named.conf"
sub_path = "named.conf"
name = "bindconf"
}
dynamic "volume_mount" {
for_each = [for m in var.named_conf_mounts :
{
name = m.name
mount_path = m.mount_path
sub_path = m.sub_path
}]
content {
name = volume_mount.value.name
mount_path = volume_mount.value.mount_path
sub_path = volume_mount.value.sub_path
}
}
volume_mount {
mount_path = "/etc/bind/db.viktorbarzin.me"
sub_path = "db.viktorbarzin.me"
name = "bindconf"
}
volume_mount {
mount_path = "/etc/bind/db.viktorbarzin.lan"
sub_path = "db.viktorbarzin.lan"
name = "bindconf"
}
volume_mount {
mount_path = "/etc/bind/db.181.191.213.in-addr.arpa"
sub_path = "db.181.191.213.in-addr.arpa"
name = "bindconf"
}
}
container {
name = "bind-exporter"
image = "prometheuscommunity/bind-exporter:latest"
image_pull_policy = "IfNotPresent"
port {
container_port = 9119
}
}
volume {
name = "bindconf"
config_map {
name = "bind-configmap"
}
}
}
}
}
}

View file

@ -1,180 +0,0 @@
; additional bind records added via terraform automation
; entries are usually programmatically added to this file

View file

@ -1,77 +0,0 @@
variable "db_viktorbarzin_me" {}
variable "db_viktorbarzin_lan" {}
variable "named_conf_options" {}
resource "kubernetes_namespace" "bind" {
metadata {
name = "bind"
}
}
resource "kubernetes_config_map" "bind_configmap" {
metadata {
name = "bind-configmap"
namespace = "bind"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"db.viktorbarzin.lan" = var.db_viktorbarzin_lan
"db.viktorbarzin.me" = format("%s%s", var.db_viktorbarzin_me, file("${path.module}/extra/viktorbarzin.me"))
"db.181.191.213.in-addr.arpa" = var.db_ptr
"named.conf" = var.named_conf
"named.conf.local" = var.named_conf_local
"named.conf.options" = var.named_conf_options
"public-named.conf.local" = var.public_named_conf_local
"public-named.conf.options" = var.public_named_conf_options
}
}
module "bind-local-deployment" {
source = "./deployment-factory"
deployment_name = "bind"
named_conf_mounts = [
{
"mount_path" = "/etc/bind/named.conf.local"
"sub_path" = "named.conf.local"
"name" = "bindconf"
},
{
mount_path = "/etc/bind/named.conf.options"
sub_path = "named.conf.options"
name = "bindconf"
}
]
}
module "bind-local-service" {
source = "./service-factory"
service_name = "bind"
port = 5354
}
module "bind-public-deployment" {
source = "./deployment-factory"
deployment_name = "bind-public"
named_conf_mounts = [
{
"mount_path" = "/etc/bind/named.conf.local"
"sub_path" = "public-named.conf.local"
"name" = "bindconf"
},
{
mount_path = "/etc/bind/named.conf.options"
sub_path = "public-named.conf.options"
name = "bindconf"
}
]
}
module "bind-public-service" {
source = "./service-factory"
service_name = "bind-public"
port = 10053
}

View file

@ -1,28 +0,0 @@
variable "service_name" {}
variable "port" {}
resource "kubernetes_service" "bind" {
metadata {
name = var.service_name
namespace = "bind"
annotations = {
"metallb.universe.tf/allow-shared-ip" = "shared"
}
labels = {
"app" = var.service_name
}
}
spec {
type = "LoadBalancer"
external_traffic_policy = "Cluster"
selector = {
"app" = var.service_name
}
port {
name = "dns"
protocol = "UDP"
port = var.port
target_port = "53"
}
}
}

View file

@ -1,98 +0,0 @@
variable "named_conf" {
default = <<EOT
// This is the primary configuration file for the BIND DNS server named.
//
// Please read /usr/share/doc/bind9/README.Debian.gz for information on the
// structure of BIND configuration files in Debian, *BEFORE* you customize
// this configuration file.
//
// If you are just adding zones, please do that in /etc/bind/named.conf.local
include "/etc/bind/named.conf.options";
include "/etc/bind/named.conf.local";
//include "/etc/bind/named.conf.default-zones";
EOT
}
variable "named_conf_local" {
default = <<EOT
//
// Do any local configuration here
//
// Consider adding the 1918 zones here, if they are not used in your
// organization
//include "/etc/bind/zones.rfc1918";
zone "viktorbarzin.me" {
type master;
file "/etc/bind/db.viktorbarzin.me";
};
zone "viktorbarzin.lan" {
type master;
file "/etc/bind/db.viktorbarzin.lan";
};
zone "181.191.213.in-addr.arpa" {
type master;
file "/etc/bind/db.181.191.213.in-addr.arpa";
};
EOT
}
variable "public_named_conf_local" {
default = <<EOT
//
// Do any local configuration here
//
// Consider adding the 1918 zones here, if they are not used in your
// organization
//include "/etc/bind/zones.rfc1918";
zone "viktorbarzin.me" {
type master;
file "/etc/bind/db.viktorbarzin.me";
};
zone "181.191.213.in-addr.arpa" {
type master;
file "/etc/bind/db.181.191.213.in-addr.arpa";
};
EOT
}
variable "public_named_conf_options" {
default = <<EOT
options {
querylog yes;
directory "/tmp/";
listen-on {
any;
};
dnssec-validation auto;
allow-recursion {
none;
};
};
EOT
}
variable "db_ptr" {
default = <<EOT
$TTL 86400
181.191.213.in-addr.arpa. IN SOA ns1.viktorbarzin.me. ns2.viktorbarzin.me. (
5 ; Serial
28800 ; Refresh
10 ; Retry
2419200 ; Expire
60 ) ; Negative Cache TTL
181.191.213.in-addr.arpa. IN NS ns1.viktorbarzin.me.
130.181.191.213.in-addr.arpa. IN PTR viktorbarzin.me.
;130 IN PTR viktorbarzin.me.
EOT
}

View file

@ -1,107 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "discount-bandit" {
metadata {
name = "discount-bandit"
# labels = {
# "istio-injection" : "enabled"
# }
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.discount-bandit.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_deployment" "discount-bandit" {
metadata {
name = "discount-bandit"
namespace = kubernetes_namespace.discount-bandit.metadata[0].name
labels = {
app = "discount-bandit"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "discount-bandit"
}
}
template {
metadata {
labels = {
app = "discount-bandit"
}
}
spec {
container {
image = "cybrarist/discount-bandit:latest-amd64"
name = "discount-bandit"
env {
name = "DB_HOST"
value = "mysql.dbaas"
}
env {
name = "DB_DATABASE"
value = "discountbandit"
}
env {
name = "DB_USERNAME"
value = "discountbandit"
}
env {
name = "DB_PASSWORD"
value = ""
}
env {
name = "APP_URL"
value = "http://discount.viktorbarzin.me:80"
}
port {
container_port = 80
}
}
}
}
}
}
resource "kubernetes_service" "discount-bandit" {
metadata {
name = "discount-bandit"
namespace = kubernetes_namespace.discount-bandit.metadata[0].name
labels = {
"app" = "discount-bandit"
}
}
spec {
selector = {
app = "discount-bandit"
}
port {
name = "http"
target_port = 80
port = 80
protocol = "TCP"
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.discount-bandit.metadata[0].name
name = "discount-bandit"
host = "discount"
tls_secret_name = var.tls_secret_name
}

View file

@ -1,80 +0,0 @@
# variable "tls_secret_name" {}
resource "kubernetes_namespace" "dnscat2" {
metadata {
name = "dnscat2"
labels = {
"istio-injection" : "disabled"
}
}
}
# module "tls_secret" {
# source = "../setup_tls_secret"
# namespace = kubernetes_namespace.dnscat2.metadata[0].name
# tls_secret_name = var.tls_secret_name
# }
resource "kubernetes_deployment" "dnscat2" {
metadata {
name = "dnscat2"
namespace = kubernetes_namespace.dnscat2.metadata[0].name
labels = {
app = "dnscat2"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "dnscat2"
}
}
template {
metadata {
labels = {
app = "dnscat2"
}
}
spec {
container {
image = "arno0x0x/dnscat2"
name = "dnscat2"
stdin = true
tty = true
port {
name = "dns"
container_port = 53
protocol = "UDP"
}
env {
name = "DOMAIN_NAME"
value = "rp.viktorbarzin.me"
}
}
}
}
}
}
resource "kubernetes_service" "dnscat2" {
metadata {
name = "dnscat2"
namespace = kubernetes_namespace.dnscat2.metadata[0].name
labels = {
"app" = "dnscat2"
}
}
spec {
selector = {
app = "dnscat2"
}
port {
name = "dns"
protocol = "UDP"
port = 53
# target_port = 53
}
}
}

View file

@ -1,92 +0,0 @@
resource "kubernetes_namespace" "dnscrypt" {
metadata {
name = "dnscrypt"
}
}
resource "kubernetes_config_map" "dnscrypt" {
metadata {
name = "dnscrypt-proxy-configmap"
namespace = kubernetes_namespace.dnscrypt.metadata[0].name
}
data = {
"dnscrypt-proxy.toml" = var.dnscrypt_proxy_toml
}
}
resource "kubernetes_deployment" "dnscrypt" {
metadata {
name = "dnscrypt-proxy"
namespace = kubernetes_namespace.dnscrypt.metadata[0].name
labels = {
app = "dnscrypt-proxy"
"kubernetes.io/cluster-service" = "true"
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "dnscrypt-proxy"
}
}
template {
metadata {
labels = {
app = "dnscrypt-proxy"
"kubernetes.io/cluster-service" = "true"
}
}
spec {
container {
image = "gists/dnscrypt-proxy:latest"
name = "dnscrypt-proxy"
image_pull_policy = "IfNotPresent"
port {
container_port = 53
protocol = "UDP"
}
volume_mount {
name = "config"
mount_path = "/etc/dnscrypt-proxy/"
}
}
volume {
name = "config"
config_map {
name = "dnscrypt-proxy-configmap"
items {
key = "dnscrypt-proxy.toml"
path = "dnscrypt-proxy.toml"
}
}
}
}
}
}
}
resource "kubernetes_service" "dnscrypt" {
metadata {
name = "dnscrypt-proxy"
namespace = kubernetes_namespace.dnscrypt.metadata[0].name
labels = {
"app" = "dnscrypt-proxy"
}
annotations = {
"metallb.universe.tf/allow-shared-ip" = "shared"
}
}
spec {
type = "LoadBalancer"
selector = {
app = "dnscrypt-proxy"
}
port {
name = "dns"
protocol = "UDP"
port = "5353"
target_port = "53"
}
}
}

View file

@ -1,23 +0,0 @@
variable "namespace" {}
variable "password" {}
variable "dockerhub_creds_secret_name" {
default = "dockerhub-creds"
}
variable "username" {
default = "viktorbarzin"
}
# DO NOT USE until able to store `stringData`
resource "kubernetes_secret" "dockerhub_creds" {
metadata {
name = var.dockerhub_creds_secret_name
namespace = var.namespace
}
# data is additionally base64 encode, no stringData yet :/ https://github.com/hashicorp/terraform-provider-kubernetes/issues/901
data = {
"username" = var.username
"password" = var.password
}
type = "kubernetes.io/basic-auth"
}

View file

@ -1,315 +0,0 @@
variable "tls_secret_name" {}
variable "prod_graphql_endpoint" {
default = "https://finance.viktorbarzin.me/graphql"
}
variable "graphql_api_secret" {}
variable "db_connection_string" {
}
variable "currency_converter_api_key" {}
variable "gocardless_secret_key" {}
variable "gocardless_secret_id" {}
resource "kubernetes_namespace" "finance_app" {
metadata {
name = "finance-app"
# TLS MiTM fails connecting to auth0
# labels = {
# "istio-injection" : "enabled"
# }
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.finance_app.metadata[0].name
tls_secret_name = var.tls_secret_name
}
# resource "kubernetes_persistent_volume" "finance_app_pv" {
# metadata {
# name = "finance-app-iscsi-pv"
# }
# spec {
# capacity = {
# "storage" = "5G"
# }
# access_modes = ["ReadWriteOnce"]
# persistent_volume_source {
# iscsi {
# target_portal = "iscsi.viktorbarzin.lan:3260"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:finance-app"
# lun = 0
# fs_type = "ext4"
# }
# }
# }
# }
# resource "kubernetes_persistent_volume_claim" "finance_app_pvc" {
# metadata {
# name = "finance-iscsi-pvc"
# namespace = kubernetes_namespace.finance_app.metadata[0].name
# }
# spec {
# access_modes = ["ReadWriteOnce"]
# resources {
# requests = {
# "storage" = "5Gi"
# }
# }
# }
# }
resource "kubernetes_deployment" "finance_app" {
metadata {
name = "finance-app"
namespace = kubernetes_namespace.finance_app.metadata[0].name
labels = {
app = "finance-app"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = 5000
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "finance-app"
}
}
template {
metadata {
labels = {
app = "finance-app"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = 5000
}
}
spec {
container {
image = "viktorbarzin/finance-app:latest"
name = "finance-app"
image_pull_policy = "Always"
# resources {
# limits = {
# cpu = "1"
# memory = "2Gi"
# }
# }
env {
name = "ENVIRONMENT"
value = "prod"
}
env {
name = "DB_CONNECTION_STRING"
value = var.db_connection_string
}
env {
name = "GRAPHQL_API_SECRET"
value = var.graphql_api_secret
}
env {
name = "ENABLE_SCHEDULER"
value = 1
}
env {
name = "DEBUG_METRICS"
value = 1
}
env {
name = "ML_MODEL_PATH"
value = "/data/ml_categorizer.pkl"
}
env {
name = "LABEL_ENCODER_PATH"
value = "/data/label_encoder_categorizer.pkl"
}
env {
name = "VECTORIZER_PATH"
value = "/data/vectorizer_categorizer.pkl"
}
env {
name = "CURRENCY_CONVERTER_API_KEY"
value = var.currency_converter_api_key
}
env {
name = "GOCARDLESS_SECRET_ID"
value = var.gocardless_secret_id
}
env {
name = "GOCARDLESS_SECRET_KEY"
value = var.gocardless_secret_key
}
# volume_mount {
# name = "data"
# mount_path = "/data"
# # sub_path = ""
# }
}
# volume {
# name = "data"
# iscsi {
# target_portal = "iscsi.viktorbarzin.me:3260"
# fs_type = "ext4"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:finance-app"
# lun = 0
# read_only = false
# }
# }
}
}
}
}
resource "kubernetes_deployment" "finance_app_frontend" {
metadata {
name = "finance-app-frontend"
namespace = kubernetes_namespace.finance_app.metadata[0].name
labels = {
app = "finance-app-frontend"
}
}
spec {
replicas = 1
strategy {
type = "RollingUpdate"
}
selector {
match_labels = {
app = "finance-app-frontend"
}
}
template {
metadata {
labels = {
app = "finance-app-frontend"
}
}
spec {
container {
image = "viktorbarzin/finance-app-frontend:latest"
name = "finance-app-frontend"
image_pull_policy = "Always"
}
}
}
}
}
resource "kubernetes_service" "finance_app" {
metadata {
name = "finance-app"
namespace = kubernetes_namespace.finance_app.metadata[0].name
labels = {
app = "finance-app"
}
}
spec {
selector = {
app = "finance-app"
}
port {
name = "http"
port = "5000"
}
}
}
resource "kubernetes_service" "finance_app_frontend" {
metadata {
name = "finance-app-frontend"
namespace = kubernetes_namespace.finance_app.metadata[0].name
labels = {
app = "finance-app-frontend"
}
}
spec {
selector = {
app = "finance-app-frontend"
}
port {
name = "http"
port = "3000"
}
}
}
resource "kubernetes_ingress_v1" "finance_app" {
metadata {
name = "finance-app"
namespace = kubernetes_namespace.finance_app.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "traefik-rate-limit@kubernetescrd,traefik-csp-headers@kubernetescrd,traefik-crowdsec@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["finance.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "finance.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "finance-app-frontend"
port {
number = 3000
}
}
}
}
}
}
rule {
host = "finance.viktorbarzin.me"
http {
path {
path = "/graphql"
backend {
service {
name = "finance-app"
port {
number = 5000
}
}
}
}
}
}
rule {
host = "finance.viktorbarzin.me"
http {
path {
path = "/webhook"
backend {
service {
name = "finance-app"
port {
number = 5000
}
}
}
}
}
}
}
}

View file

@ -1,74 +0,0 @@
# hostname: home-assistant
ingress:
main:
# -- Enables or disables the ingress
enabled: true
# -- Make this the primary ingress (used in probes, notes, etc...).
# If there is more than 1 ingress, make sure that only 1 ingress is marked as primary.
primary: true
# -- Override the name suffix that is used for this ingress.
nameOverride:
# -- Provide additional annotations which may be required.
annotations: #{}
kubernetes.io/ingress.class : "nginx"
nginx.ingress.kubernetes.io/force-ssl-redirect : "true"
nginx.ingress.kubernetes.io/auth-tls-verify-client : "on"
nginx.ingress.kubernetes.io/auth-tls-secret : ${client_certificate_secret_name}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Provide additional labels which may be required.
labels: {}
# -- Set the ingressClass that is used for this ingress.
# Requires Kubernetes >=1.19
ingressClassName: # "nginx"
## Configure the hosts for the ingress
hosts:
- # -- Host address. Helm template can be passed.
host: home-assistant.viktorbarzin.me
## Configure the paths for the host
paths:
- # -- Path. Helm template can be passed.
path: /
# -- Ignored if not kubeVersion >= 1.14-0
pathType: Prefix
service:
# -- Overrides the service name reference for this path
name: home-assistant
# -- Overrides the service port reference for this path
port: 8123
# -- Configure TLS for the ingress. Both secretName and hosts can process a Helm template.
tls: #[]
- secretName: ${tls_secret_name}
hosts:
- home-assistant.viktorbarzin.me
# -- Configure persistence for the chart here.
# Additional items can be added by adding a dictionary key similar to the 'config' key.
# [[ref]](http://docs.k8s-at-home.com/our-helm-charts/common-library-storage)
# @default -- See below
persistence:
# -- Default persistence for configuration files.
# @default -- See below
config:
# -- Enables or disables the persistence item
enabled: false
# -- Sets the persistence type
# Valid options are pvc, emptyDir, hostPath, secret, configMap or custom
type: configMap
name: home-assistant-configmap
# -- Where to mount the volume in the main container.
# Defaults to `/<name_of_the_volume>`,
# setting to '-' creates the volume but disables the volumeMount.
mountPath: /config
# -- Specify if the volume should be mounted read-only.
readOnly: true

View file

@ -1,238 +0,0 @@
variable "tls_secret_name" {}
variable "client_certificate_secret_name" {}
variable "configuration_yaml" {}
resource "kubernetes_namespace" "home_assistant" {
metadata {
name = "home-assistant"
}
}
resource "kubernetes_config_map" "home_assistant_config_map" {
metadata {
name = "home-assistant-configmap"
namespace = kubernetes_namespace.home_assistant.metadata[0].name
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
# "db.viktorbarzin.lan" = var.db_viktorbarzin_lan
# "db.viktorbarzin.me" = format("%s%s", var.db_viktorbarzin_me, file("${path.module}/extra/viktorbarzin.me"))
# "db.181.191.213.in-addr.arpa" = var.db_ptr
"configuration.yaml" = var.configuration_yaml
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.home_assistant.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "helm_release" "home_assistant" {
namespace = kubernetes_namespace.home_assistant.metadata[0].name
create_namespace = true
name = "home-assistant"
repository = "https://k8s-at-home.com/charts/"
chart = "home-assistant"
values = [templatefile("${path.module}/home_assistant_chart_values.tpl", { tls_secret_name = var.tls_secret_name, client_certificate_secret_name = var.client_certificate_secret_name })]
}
resource "kubernetes_deployment" "home_assistant" {
metadata {
name = "home-assistant"
namespace = kubernetes_namespace.home_assistant.metadata[0].name
labels = {
"app.kubernetes.io/instance" = "home-assistant"
"app.kubernetes.io/name" = "home-assistant"
"app.kubernetes.io/version" = "2022.5.4"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app.kubernetes.io/instance" = "home-assistant"
"app.kubernetes.io/name" = "home-assistant"
}
}
template {
metadata {
labels = {
"app.kubernetes.io/instance" = "home-assistant"
"app.kubernetes.io/name" = "home-assistant"
}
}
spec {
container {
name = "home-assistant"
# image = "ghcr.io/home-assistant/home-assistant:2022.5.4"
image = "ghcr.io/home-assistant/home-assistant:2022.5.5"
# image = "ghcr.io/home-assistant/home-assistant"
port {
name = "http"
container_port = 8123
protocol = "TCP"
}
env {
name = "TZ"
value = "UTC+3"
}
volume_mount {
name = "configuration"
mount_path = "/config"
# sub_path = "hackmd"
}
liveness_probe {
tcp_socket {
port = "8123"
}
timeout_seconds = 1
period_seconds = 10
success_threshold = 1
failure_threshold = 3
}
readiness_probe {
tcp_socket {
port = "8123"
}
timeout_seconds = 1
period_seconds = 10
success_threshold = 1
failure_threshold = 3
}
startup_probe {
tcp_socket {
port = "8123"
}
timeout_seconds = 1
period_seconds = 5
success_threshold = 1
failure_threshold = 30
}
termination_message_path = "/dev/termination-log"
image_pull_policy = "IfNotPresent"
}
volume {
name = "configuration"
iscsi {
target_portal = "iscsi.viktorbarzin.lan:3260"
fs_type = "ext4"
iqn = "iqn.2020-12.lan.viktorbarzin:storage:home-assistant"
lun = 0
read_only = false
}
}
restart_policy = "Always"
termination_grace_period_seconds = 30
dns_policy = "ClusterFirst"
service_account_name = "default"
}
}
strategy {
type = "Recreate"
}
revision_history_limit = 3
}
}
resource "kubernetes_service" "home_assistant" {
metadata {
name = "home-assistant"
namespace = kubernetes_namespace.home_assistant.metadata[0].name
labels = {
"app.kubernetes.io/instance" = "home-assistant"
"app.kubernetes.io/managed-by" = "Helm"
"app.kubernetes.io/name" = "home-assistant"
"app.kubernetes.io/version" = "2022.5.4"
"helm.sh/chart" = "home-assistant-13.2.0"
}
annotations = {
"meta.helm.sh/release-name" = "home-assistant"
"meta.helm.sh/release-namespace" = "home-assistant"
}
}
spec {
port {
name = "http"
protocol = "TCP"
port = 8123
target_port = "http"
}
selector = {
"app.kubernetes.io/instance" = "home-assistant"
"app.kubernetes.io/name" = "home-assistant"
}
# cluster_ip = "10.102.20.150"
type = "ClusterIP"
session_affinity = "None"
}
}
resource "kubernetes_ingress_v1" "home-assistant-ui" {
metadata {
name = "home-assistant-ui-ingress"
namespace = kubernetes_namespace.home_assistant.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "traefik-rate-limit@kubernetescrd,traefik-csp-headers@kubernetescrd,traefik-crowdsec@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
"traefik.ingress.kubernetes.io/router.tls.options" = "traefik-mtls@kubernetescrd"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["home-assistant.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "home-assistant.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "home-assistant"
port {
number = 8123
}
}
}
}
}
}
}
}

View file

@ -1,12 +0,0 @@
#!/bin/bash
user="user"
pass="pass"
# Get power supply on outside system voltage
curl -s -k -u $user:$pass -H"Content-type: application/json" -X GET https://idrac/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2 |jq .LineInputVoltage
# Power off
curl -s -k -u $user:$pass -X POST -d '{"Action": "Reset", "ResetType": "GracefulShutdown"}' -H"Content-type: application/json" https://idrac/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset
# Power on
curl -s -k -u $user:$pass -X POST -d '{"Action": "Reset", "ResetType": "On"}' -H"Content-type: application/json" https://idrac/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset

View file

@ -1,40 +0,0 @@
global:
# ImagePullSecrets for control plane ServiceAccount, list of secrets in the same namespace
# to use for pulling any images in pods that reference this ServiceAccount.
# Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
# Used to locate istiod.
istioNamespace: istio-system
istiod:
enableAnalysis: false
configValidation: true
externalIstiod: false
remotePilotAddress: ""
# Platform where Istio is deployed. Possible values are: "openshift", "gcp".
# An empty value means it is a vanilla Kubernetes distribution, therefore no special
# treatment will be considered.
platform: ""
# Setup how istiod Service is configured. See https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
# This is intended only for use with external istiod.
ipFamilyPolicy: ""
ipFamilies: []
base:
# Used for helm2 to add the CRDs to templates.
enableCRDTemplates: false
# Validation webhook configuration url
# For example: https://$remotePilotAddress:15017/validate
validationURL: ""
# For istioctl usage to disable istio config crds in base
enableIstioConfigCRDs: true
defaultRevision: "default"

View file

@ -1,520 +0,0 @@
#.Values.pilot for discovery and mesh wide config
## Discovery Settings
pilot:
autoscaleEnabled: true
autoscaleMin: 1
autoscaleMax: 5
autoscaleBehavior: {}
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
hub: ""
tag: ""
variant: ""
# Can be a full hub/image:tag
image: pilot
traceSampling: 1.0
# Resources for a small pilot install
resources:
requests:
cpu: 500m
memory: 2048Mi
# Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
# Additional container arguments
extraContainerArgs: []
env: {}
cpu:
targetAverageUtilization: 80
# Additional volumeMounts to the istiod container
volumeMounts: []
# Additional volumes to the istiod pod
volumes: []
nodeSelector: {}
podAnnotations: {}
serviceAnnotations: {}
topologySpreadConstraints: []
# You can use jwksResolverExtraRootCA to provide a root certificate
# in PEM format. This will then be trusted by pilot when resolving
# JWKS URIs.
jwksResolverExtraRootCA: ""
# This is used to set the source of configuration for
# the associated address in configSource, if nothing is specified
# the default MCP is assumed.
configSource:
subscribedResources: []
plugins: []
# The following is used to limit how long a sidecar can be connected
# to a pilot. It balances out load across pilot instances at the cost of
# increasing system churn.
keepaliveMaxServerConnectionAge: 30m
# Additional labels to apply to the deployment.
deploymentLabels: {}
## Mesh config settings
# Install the mesh config map, generated from values.yaml.
# If false, pilot wil use default values (by default) or user-supplied values.
configMap: true
# Additional labels to apply on the pod level for monitoring and logging configuration.
podLabels: {}
# Setup how istiod Service is configured. See https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
ipFamilyPolicy: ""
ipFamilies: []
sidecarInjectorWebhook:
# You can use the field called alwaysInjectSelector and neverInjectSelector which will always inject the sidecar or
# always skip the injection on pods that match that label selector, regardless of the global policy.
# See https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#more-control-adding-exceptions
neverInjectSelector: []
alwaysInjectSelector: []
# injectedAnnotations are additional annotations that will be added to the pod spec after injection
# This is primarily to support PSP annotations. For example, if you defined a PSP with the annotations:
#
# annotations:
# apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
# apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
#
# The PSP controller would add corresponding annotations to the pod spec for each container. However, this happens before
# the inject adds additional containers, so we must specify them explicitly here. With the above example, we could specify:
# injectedAnnotations:
# container.apparmor.security.beta.kubernetes.io/istio-init: runtime/default
# container.apparmor.security.beta.kubernetes.io/istio-proxy: runtime/default
injectedAnnotations: {}
# This enables injection of sidecar in all namespaces,
# with the exception of namespaces with "istio-injection:disabled" annotation
# Only one environment should have this enabled.
enableNamespacesByDefault: false
# Mutations that occur after the sidecar injector are not handled by default, as the Istio sidecar injector is only run
# once. For example, an OPA sidecar injected after the Istio sidecar will not have it's liveness/readiness probes rewritten.
# Setting this to `IfNeeded` will result in the sidecar injector being run again if additional mutations occur.
reinvocationPolicy: Never
rewriteAppHTTPProbe: true
# Templates defines a set of custom injection templates that can be used. For example, defining:
#
# templates:
# hello: |
# metadata:
# labels:
# hello: world
#
# Then starting a pod with the `inject.istio.io/templates: hello` annotation, will result in the pod
# being injected with the hello=world labels.
# This is intended for advanced configuration only; most users should use the built in template
templates: {}
# Default templates specifies a set of default templates that are used in sidecar injection.
# By default, a template `sidecar` is always provided, which contains the template of default sidecar.
# To inject other additional templates, define it using the `templates` option, and add it to
# the default templates list.
# For example:
#
# templates:
# hello: |
# metadata:
# labels:
# hello: world
#
# defaultTemplates: ["sidecar", "hello"]
defaultTemplates: []
istiodRemote:
# Sidecar injector mutating webhook configuration clientConfig.url value.
# For example: https://$remotePilotAddress:15017/inject
# The host should not refer to a service running in the cluster; use a service reference by specifying
# the clientConfig.service field instead.
injectionURL: ""
# Sidecar injector mutating webhook configuration path value for the clientConfig.service field.
# Override to pass env variables, for example: /inject/cluster/remote/net/network2
injectionPath: "/inject"
telemetry:
enabled: true
v2:
# For Null VM case now.
# This also enables metadata exchange.
enabled: true
metadataExchange:
# Indicates whether to enable WebAssembly runtime for metadata exchange filter.
wasmEnabled: false
# Indicate if prometheus stats filter is enabled or not
prometheus:
enabled: true
# Indicates whether to enable WebAssembly runtime for stats filter.
wasmEnabled: false
# overrides stats EnvoyFilter configuration.
configOverride:
gateway: {}
inboundSidecar: {}
outboundSidecar: {}
# stackdriver filter settings.
stackdriver:
enabled: false
logging: false
monitoring: false
topology: false # deprecated. setting this to true will have no effect, as this option is no longer supported.
disableOutbound: false
# configOverride parts give you the ability to override the low level configuration params passed to envoy filter.
configOverride: {}
# e.g.
# disable_server_access_logging: false
# disable_host_header_fallback: true
# Access Log Policy Filter Settings. This enables filtering of access logs from stackdriver.
accessLogPolicy:
enabled: false
# To reduce the number of successful logs, default log window duration is
# set to 12 hours.
logWindowDuration: "43200s"
# Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
# Revision tags are aliases to Istio control plane revisions
revisionTags: []
# For Helm compatibility.
ownerName: ""
# meshConfig defines runtime configuration of components, including Istiod and istio-agent behavior
# See https://istio.io/docs/reference/config/istio.mesh.v1alpha1/ for all available options
meshConfig:
enablePrometheusMerge: true
global:
# Used to locate istiod.
istioNamespace: istio-system
# List of cert-signers to allow "approve" action in the istio cluster role
#
# certSigners:
# - clusterissuers.cert-manager.io/istio-ca
certSigners: []
# enable pod disruption budget for the control plane, which is used to
# ensure Istio control plane components are gradually upgraded or recovered.
defaultPodDisruptionBudget:
enabled: true
# The values aren't mutable due to a current PodDisruptionBudget limitation
# minAvailable: 1
# A minimal set of requested resources to applied to all deployments so that
# Horizontal Pod Autoscaler will be able to function (if set).
# Each component can overwrite these default values by adding its own resources
# block in the relevant section below and setting the desired resources values.
defaultResources:
requests:
cpu: 10m
# memory: 128Mi
# limits:
# cpu: 100m
# memory: 128Mi
# Default hub for Istio images.
# Releases are published to docker hub under 'istio' project.
# Dev builds from prow are on gcr.io
hub: docker.io/istio
# Default tag for Istio images.
tag: 1.20.1
# Variant of the image to use.
# Currently supported are: [debug, distroless]
variant: ""
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
# ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
# to use for pulling any images in pods that reference this ServiceAccount.
# For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
# ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
# Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
# - private-registry-key
# Enabled by default in master for maximising testing.
istiod:
enableAnalysis: false
# To output all istio components logs in json format by adding --log_as_json argument to each container argument
logAsJson: false
# Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>
# The control plane has different scopes depending on component, but can configure default log level across all components
# If empty, default scope and level will be used as configured in code
logging:
level: "default:info"
omitSidecarInjectorConfigMap: false
# Whether to restrict the applications namespace the controller manages;
# If not set, controller watches all namespaces
oneNamespace: false
# Configure whether Operator manages webhook configurations. The current behavior
# of Istiod is to manage its own webhook configurations.
# When this option is set as true, Istio Operator, instead of webhooks, manages the
# webhook configurations. When this option is set as false, webhooks manage their
# own webhook configurations.
operatorManageWebhooks: false
# Custom DNS config for the pod to resolve names of services in other
# clusters. Use this to add additional search domains, and other settings.
# see
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config
# This does not apply to gateway pods as they typically need a different
# set of DNS settings than the normal application pods (e.g., in
# multicluster scenarios).
# NOTE: If using templates, follow the pattern in the commented example below.
#podDNSSearchNamespaces:
#- global
#- "{{ valueOrDefault .DeploymentMeta.Namespace \"default\" }}.global"
# Kubernetes >=v1.11.0 will create two PriorityClass, including system-cluster-critical and
# system-node-critical, it is better to configure this in order to make sure your Istio pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""
proxy:
image: proxyv2
# This controls the 'policy' in the sidecar injector.
autoInject: enabled
# CAUTION: It is important to ensure that all Istio helm charts specify the same clusterDomain value
# cluster domain. Default value is "cluster.local".
clusterDomain: "cluster.local"
# Per Component log level for proxy, applies to gateways and sidecars. If a component level is
# not set, then the global "logLevel" will be used.
componentLogLevel: "misc:error"
# If set, newly injected sidecars will have core dumps enabled.
enableCoreDump: false
# istio ingress capture allowlist
# examples:
# Redirect only selected ports: --includeInboundPorts="80,8080"
excludeInboundPorts: ""
includeInboundPorts: "*"
# istio egress capture allowlist
# https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly
# example: includeIPRanges: "172.30.0.0/16,172.20.0.0/16"
# would only capture egress traffic on those two IP Ranges, all other outbound traffic would
# be allowed by the sidecar
includeIPRanges: "*"
excludeIPRanges: ""
includeOutboundPorts: ""
excludeOutboundPorts: ""
# Log level for proxy, applies to gateways and sidecars.
# Expected values are: trace|debug|info|warning|error|critical|off
logLevel: warning
#If set to true, istio-proxy container will have privileged securityContext
privileged: false
# The number of successive failed probes before indicating readiness failure.
readinessFailureThreshold: 4
# The initial delay for readiness probes in seconds.
readinessInitialDelaySeconds: 0
# The period between readiness probes.
readinessPeriodSeconds: 15
# Enables or disables a startup probe.
# For optimal startup times, changing this should be tied to the readiness probe values.
#
# If the probe is enabled, it is recommended to have delay=0s,period=15s,failureThreshold=4.
# This ensures the pod is marked ready immediately after the startup probe passes (which has a 1s poll interval),
# and doesn't spam the readiness endpoint too much
#
# If the probe is disabled, it is recommended to have delay=1s,period=2s,failureThreshold=30.
# This ensures the startup is reasonable fast (polling every 2s). 1s delay is used since the startup is not often ready instantly.
startupProbe:
enabled: true
failureThreshold: 600 # 10 minutes
# Resources for the sidecar.
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
# Default port for Pilot agent health checks. A value of 0 will disable health checking.
statusPort: 15020
# Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver.
# If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file.
tracer: "zipkin"
proxy_init:
# Base name for the proxy_init container, used to configure iptables.
image: proxyv2
# configure remote pilot and istiod service and endpoint
remotePilotAddress: ""
##############################################################################################
# The following values are found in other charts. To effectively modify these values, make #
# make sure they are consistent across your Istio helm charts #
##############################################################################################
# The customized CA address to retrieve certificates for the pods in the cluster.
# CSR clients such as the Istio Agent and ingress gateways can use this to specify the CA endpoint.
# If not set explicitly, default to the Istio discovery address.
caAddress: ""
# Configure a remote cluster data plane controlled by an external istiod.
# When set to true, istiod is not deployed locally and only a subset of the other
# discovery charts are enabled.
externalIstiod: false
# Configure a remote cluster as the config cluster for an external istiod.
configCluster: false
# Configure the policy for validating JWT.
# Currently, two options are supported: "third-party-jwt" and "first-party-jwt".
jwtPolicy: "third-party-jwt"
# Mesh ID means Mesh Identifier. It should be unique within the scope where
# meshes will interact with each other, but it is not required to be
# globally/universally unique. For example, if any of the following are true,
# then two meshes must have different Mesh IDs:
# - Meshes will have their telemetry aggregated in one place
# - Meshes will be federated together
# - Policy will be written referencing one mesh from the other
#
# If an administrator expects that any of these conditions may become true in
# the future, they should ensure their meshes have different Mesh IDs
# assigned.
#
# Within a multicluster mesh, each cluster must be (manually or auto)
# configured to have the same Mesh ID value. If an existing cluster 'joins' a
# multicluster mesh, it will need to be migrated to the new mesh ID. Details
# of migration TBD, and it may be a disruptive operation to change the Mesh
# ID post-install.
#
# If the mesh admin does not specify a value, Istio will use the value of the
# mesh's Trust Domain. The best practice is to select a proper Trust Domain
# value.
meshID: ""
# Configure the mesh networks to be used by the Split Horizon EDS.
#
# The following example defines two networks with different endpoints association methods.
# For `network1` all endpoints that their IP belongs to the provided CIDR range will be
# mapped to network1. The gateway for this network example is specified by its public IP
# address and port.
# The second network, `network2`, in this example is defined differently with all endpoints
# retrieved through the specified Multi-Cluster registry being mapped to network2. The
# gateway is also defined differently with the name of the gateway service on the remote
# cluster. The public IP for the gateway will be determined from that remote service (only
# LoadBalancer gateway service type is currently supported, for a NodePort type gateway service,
# it still need to be configured manually).
#
# meshNetworks:
# network1:
# endpoints:
# - fromCidr: "192.168.0.1/24"
# gateways:
# - address: 1.1.1.1
# port: 80
# network2:
# endpoints:
# - fromRegistry: reg1
# gateways:
# - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local
# port: 443
#
meshNetworks: {}
# Use the user-specified, secret volume mounted key and certs for Pilot and workloads.
mountMtlsCerts: false
multiCluster:
# Set to true to connect two kubernetes clusters via their respective
# ingressgateway services when pods in each cluster cannot directly
# talk to one another. All clusters should be using Istio mTLS and must
# have a shared root CA for this model to work.
enabled: false
# Should be set to the name of the cluster this installation will run in. This is required for sidecar injection
# to properly label proxies
clusterName: ""
# Network defines the network this cluster belong to. This name
# corresponds to the networks in the map of mesh networks.
network: ""
# Configure the certificate provider for control plane communication.
# Currently, two providers are supported: "kubernetes" and "istiod".
# As some platforms may not have kubernetes signing APIs,
# Istiod is the default
pilotCertProvider: istiod
sds:
# The JWT token for SDS and the aud field of such JWT. See RFC 7519, section 4.1.3.
# When a CSR is sent from Istio Agent to the CA (e.g. Istiod), this aud is to make sure the
# JWT is intended for the CA.
token:
aud: istio-ca
sts:
# The service port used by Security Token Service (STS) server to handle token exchange requests.
# Setting this port to a non-zero value enables STS server.
servicePort: 0
# The name of the CA for workload certificates.
# For example, when caName=GkeWorkloadCertificate, GKE workload certificates
# will be used as the certificates for workloads.
# The default value is "" and when caName="", the CA will be configured by other
# mechanisms (e.g., environmental variable CA_PROVIDER).
caName: ""
# whether to use autoscaling/v2 template for HPA settings
# for internal usage only, not to be configured by users.
autoscalingv2API: true
base:
# For istioctl usage to disable istio config crds in base
enableIstioConfigCRDs: true
# If enabled, gateway-api types will be validated using the standard upstream validation logic.
# This is an alternative to deploying the standalone validation server the project provides.
# This is disabled by default, as the cluster may already have a validation server; while technically
# it works to have multiple redundant validations, this adds complexity and operational risks.
# Users should consider enabling this if they want full gateway-api validation but don't have other validation servers.
validateGateway: false
# keep in sync with settings used when installing the Istio CNI chart
istio_cni:
enabled: false
chained: true

View file

@ -1,122 +0,0 @@
nameOverride: ""
fullnameOverride: ""
image: # see: https://quay.io/repository/kiali/kiali-operator?tab=tags
repo: quay.io/kiali/kiali-operator # quay.io/kiali/kiali-operator
tag: v1.78.0 # version string like v1.39.0 or a digest hash
digest: "" # use "sha256" if tag is a sha256 hash (do NOT prefix this value with a "@")
pullPolicy: Always
pullSecrets: []
# Deployment options for the operator pod.
nodeSelector: {}
podAnnotations: {}
podLabels: {}
env: []
tolerations: []
resources:
requests:
cpu: "10m"
memory: "64Mi"
affinity: {}
replicaCount: 1
priorityClassName: ""
securityContext: {}
# metrics.enabled: set to true if you want Prometheus to collect metrics from the operator
metrics:
enabled: true
# debug.enabled: when true the full ansible logs are dumped after each reconciliation run
# debug.verbosity: defines the amount of details the operator will log (higher numbers are more noisy)
# debug.enableProfiler: when true (regardless of debug.enabled), timings for the most expensive tasks will be logged after each reconciliation loop
debug:
enabled: true
verbosity: "1"
enableProfiler: false
# Defines where the operator will look for Kial CR resources. "" means "all namespaces".
watchNamespace: ""
# Set to true if you want the operator to be able to create cluster roles. This is necessary
# if you want to support Kiali CRs with spec.deployment.accessible_namespaces of '**'.
# Setting this to "true" requires allowAllAccessibleNamespaces to be "true" also.
# Note that this will be overriden to "true" if cr.create is true and cr.spec.deployment.accessible_namespaces is ['**'].
clusterRoleCreator: true
# Set to a list of secrets in the cluster that the operator will be allowed to read. This is necessary if you want to
# support Kiali CRs with spec.kiali_feature_flags.certificates_information_indicators.enabled=true.
# The secrets in this list will be the only ones allowed to be specified in any Kiali CR (in the setting
# spec.kiali_feature_flags.certificates_information_indicators.secrets).
# If you set this to an empty list, the operator will not be given permission to read any additional secrets
# found in the cluster, and thus will only support a value of "false" in the Kiali CR setting
# spec.kiali_feature_flags.certificates_information_indicators.enabled.
secretReader: ["cacerts", "istio-ca-secret"]
# Set to true if you want to allow the operator to only be able to install Kiali in view-only-mode.
# The purpose for this setting is to allow you to restrict the permissions given to the operator itself.
onlyViewOnlyMode: false
# allowAdHocKialiNamespace tells the operator to allow a user to be able to install a Kiali CR in one namespace but
# be able to install Kiali in another namespace. In other words, it will allow the Kiali CR spec.deployment.namespace
# to be something other than the namespace where the CR is installed. You may want to disable this if you are
# running in a multi-tenant scenario in which you only want a user to be able to install Kiali in the same namespace
# where the user has permissions to install a Kiali CR.
allowAdHocKialiNamespace: true
# allowAdHocKialiImage tells the operator to allow a user to be able to install a custom Kiali image as opposed
# to the image the operator will install by default. In other words, it will allow the
# Kiali CR spec.deployment.image_name and spec.deployment.image_version to be configured by the user.
# You may want to disable this if you do not want users to install their own Kiali images.
allowAdHocKialiImage: false
# allowAdHocOSSMConsoleImage tells the operator to allow a user to be able to install a custom OSSMC image as opposed
# to the image the operator will install by default. In other words, it will allow the
# OSSMConsole CR spec.deployment.imageName and spec.deployment.imageVersion to be configured by the user.
# You may want to disable this if you do not want users to install their own OSSMC images.
# This is only applicable when running on OpenShift.
allowAdHocOSSMConsoleImage: false
# allowSecurityContextOverride tells the operator to allow a user to be able to fully override the Kiali
# container securityContext. If this is false, certain securityContext settings must exist on the Kiali
# container and any attempt to override them will be ignored.
allowSecurityContextOverride: false
# allowAllAccessibleNamespaces tells the operator to allow a user to be able to configure Kiali
# to access all namespaces in the cluster via spec.deployment.accessible_namespaces=['**'].
# If this is false, the user must specify an explicit list of namespaces in the Kiali CR.
# Setting this to "true" requires clusterRoleCreator to be "true" also.
# Note that this will be overriden to "true" if cr.create is true and cr.spec.deployment.accessible_namespaces is ['**'].
allowAllAccessibleNamespaces: true
# accessibleNamespacesLabel restricts the namespaces that a user can add to the Kiali CR spec.deployment.accessible_namespaces.
# This value is either an empty string (which disables this feature) or a label name with an optional label value
# (e.g. "mylabel" or "mylabel=myvalue"). Only namespaces that have that label will be permitted in
# spec.deployment.accessible_namespaces. Any namespace not labeled properly but specified in accessible_namespaces will cause
# the operator to abort the Kiali installation.
# If just a label name (but no label value) is specified, the label value the operator will look for is the value of
# the Kiali CR's spec.istio_namespace. In other words, the operator will look for the named label whose value must be the name
# of the Istio control plane namespace (which is typically, but not necessarily, "istio-system").
accessibleNamespacesLabel: ""
# For what a Kiali CR spec can look like, see:
# https://github.com/kiali/kiali-operator/blob/master/deploy/kiali/kiali_cr.yaml
cr:
create: false
name: kiali
# If you elect to create a Kiali CR (--set cr.create=true)
# and the operator is watching all namespaces (--set watchNamespace="")
# then this is the namespace where the CR will be created (the default will be the operator namespace).
namespace: ""
# Annotations to place in the Kiali CR metadata.
annotations: {}
spec:
deployment:
accessible_namespaces:
- "**"
external_services:
prometheus:
# Prometheus service name is "metrics" and is in the "telemetry" namespace
url: "http://prometheus-server.monitoring:80/"

View file

@ -1,116 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "istio" {
metadata {
name = "istio-system"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.istio.metadata[0].name
tls_secret_name = var.tls_secret_name
}
# to delete all CRDS: kubectl get crd -oname | grep --color=never 'istio.io' | xargs kubectl delete
resource "helm_release" "istio-base" {
namespace = kubernetes_namespace.istio.metadata[0].name
create_namespace = false
name = "istio-base"
atomic = true
repository = "https://istio-release.storage.googleapis.com/charts"
chart = "base"
depends_on = [kubernetes_namespace.istio]
}
resource "helm_release" "istiod" {
namespace = kubernetes_namespace.istio.metadata[0].name
create_namespace = false
name = "istiod"
atomic = true
repository = "https://istio-release.storage.googleapis.com/charts"
chart = "istiod"
depends_on = [kubernetes_namespace.istio]
}
resource "helm_release" "istio-gateway" {
namespace = kubernetes_namespace.istio.metadata[0].name
create_namespace = false
name = "istio-gateway"
atomic = true
repository = "https://istio-release.storage.googleapis.com/charts"
chart = "gateway"
depends_on = [kubernetes_namespace.istio]
}
# Kiali dashboard
resource "helm_release" "kiali" {
namespace = kubernetes_namespace.istio.metadata[0].name
create_namespace = false
name = "kiali"
atomic = true
repository = "https://kiali.org/helm-charts"
chart = "kiali-operator"
set {
name = "cr.create"
value = "true"
}
set {
name = "cr.namespace"
value = "istio-system"
}
values = [templatefile("${path.module}/kiali.yaml", {})]
depends_on = [kubernetes_namespace.istio]
}
resource "kubernetes_secret" "kiali-token" {
metadata {
name = "kiali-secret"
namespace = kubernetes_namespace.istio.metadata[0].name
annotations = {
"kubernetes.io/service-account.name" : "kiali-service-account"
}
}
type = "kubernetes.io/service-account-token"
}
# Gets auto removed. revisit after finishing power consmption analysis
# resource "kubernetes_ingress_v1" "kiali" {
# metadata {
# name = "kiali"
# namespace = kubernetes_namespace.istio.metadata[0].name
# annotations = {
# "kubernetes.io/ingress.class" = "nginx"
# "nginx.ingress.kubernetes.io/auth-url" : "https://oauth2.viktorbarzin.me/oauth2/auth"
# "nginx.ingress.kubernetes.io/auth-signin" : "https://oauth2.viktorbarzin.me/oauth2/start?rd=/redirect/$http_host$escaped_request_uri"
# }
# }
# spec {
# tls {
# hosts = ["kiali.viktorbarzin.me"]
# secret_name = var.tls_secret_name
# }
# rule {
# host = "kiali.viktorbarzin.me"
# http {
# path {
# path = "/"
# backend {
# service {
# name = "kiali"
# port {
# number = 20001
# }
# }
# }
# }
# }
# }
# }
# }

View file

@ -1,117 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "jellyfin" {
metadata {
name = "jellyfin"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.jellyfin.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_deployment" "jellyfin" {
metadata {
name = "jellyfin"
namespace = kubernetes_namespace.jellyfin.metadata[0].name
labels = {
app = "jellyfin"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "jellyfin"
}
}
template {
metadata {
labels = {
app = "jellyfin"
}
}
spec {
container {
image = "jellyfin/jellyfin"
name = "jellyfin"
port {
container_port = 8096
}
volume_mount {
name = "media"
mount_path = "/media"
}
volume_mount {
name = "config"
mount_path = "/config"
}
volume_mount {
name = "cache"
mount_path = "/cache"
}
}
volume {
name = "media"
nfs {
path = "/mnt/main/jellyfin/media"
server = "10.0.10.15"
}
}
volume {
name = "config"
nfs {
path = "/mnt/main/jellyfin/config"
server = "10.0.10.15"
}
}
volume {
name = "cache"
nfs {
path = "/mnt/main/jellyfin/cache"
server = "10.0.10.15"
}
}
}
}
}
}
resource "kubernetes_service" "jellyfin" {
metadata {
name = "jellyfin"
namespace = kubernetes_namespace.jellyfin.metadata[0].name
labels = {
"app" = "jellyfin"
}
}
spec {
selector = {
app = "jellyfin"
}
port {
name = "http"
target_port = 8096
port = 80
protocol = "TCP"
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.jellyfin.metadata[0].name
name = "jellyfin"
tls_secret_name = var.tls_secret_name
}

View file

@ -1,9 +0,0 @@
metrics:
kafka:
enabled: true
persistence:
enabled: false
zookeeper:
persistence:
enabled: false
replicaCount: 3

View file

@ -1,142 +0,0 @@
variable "tls_secret_name" {}
variable "client_certificate_secret_name" {}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.kafka.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "helm_release" "kafka" {
namespace = kubernetes_namespace.kafka.metadata[0].name
create_namespace = true
name = "kafka"
repository = "https://charts.bitnami.com/bitnami"
chart = "kafka"
values = [templatefile("${path.module}/kafka_chart_values.tpl", {})]
}
resource "kubernetes_deployment" "kafka-ui" {
metadata {
name = "kafka-ui"
namespace = kubernetes_namespace.kafka.metadata[0].name
labels = {
run = "kafka-ui"
}
}
spec {
replicas = 1
selector {
match_labels = {
run = "kafka-ui"
}
}
template {
metadata {
labels = {
run = "kafka-ui"
}
}
spec {
container {
image = "provectuslabs/kafka-ui:latest"
name = "kafka-ui"
resources {
limits = {
cpu = "0.5"
memory = "512Mi"
}
requests = {
cpu = "250m"
memory = "50Mi"
}
}
port {
container_port = 8080
}
env {
name = "KAFKA_CLUSTERS_0_NAME"
value = "local"
}
env {
name = "KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS"
value = "kafka:9092"
}
env {
name = "KAFKA_CLUSTERS_0_ZOOKEEPER"
value = "kafka-zookeeper:2181"
}
}
}
}
}
}
resource "kubernetes_service" "kafka-ui" {
metadata {
name = "kafka-ui"
namespace = kubernetes_namespace.kafka.metadata[0].name
labels = {
"run" = "kafka-ui"
}
# annotations = {
# "prometheus.io/scrape" = "true"
# "prometheus.io/path" = "/metrics"
# "prometheus.io/port" = "9113"
# }
}
spec {
selector = {
run = "kafka-ui"
}
port {
name = "http"
port = "80"
target_port = "8080"
}
# port {
# name = "prometheus"
# port = "9113"
# target_port = "9113"
# }
}
}
resource "kubernetes_ingress_v1" "kafka-ui" {
metadata {
name = "kafka-ui-ingress"
namespace = kubernetes_namespace.kafka.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "traefik-rate-limit@kubernetescrd,traefik-csp-headers@kubernetescrd,traefik-crowdsec@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
"traefik.ingress.kubernetes.io/router.tls.options" = "traefik-mtls@kubernetescrd"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["kafka.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "kafka.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "kafka-ui"
port {
number = 80
}
}
}
}
}
}
}
}

View file

@ -1,73 +0,0 @@
This contains the setup for setting up a remote machine that serves a keyfile for decrypting a luks volume
1. Install nginx
```
sudo apt update
sudo apt install nginx apache2-utils -y
```
2. Create User for basic auth
```
sudo htpasswd -c /etc/nginx/.htpasswd truenas
```
3. Create secure directory and key file
```
sudo mkdir -p /srv/keys
head -c 128 /dev/urandom | sudo tee /srv/keys/truenas.key >/dev/null
```
4. Create rate limit zone
```
# /etc/nginx/conf.d/ratelimit.conf
# Allow only 3 key requests per minute per IP
limit_req_zone $binary_remote_addr zone=keylimit:10m rate=3r/m;
```
5. Configure nginx virtual host
```
# /etc/nginx/sites-available/keyserver.conf
server {
listen 443 ssl;
server_name <ip address here>;
# TLS certificate and key (we will set these in the next step)
ssl_certificate /etc/ssl/certs/keyserver.crt;
ssl_certificate_key /etc/ssl/private/keyserver.key;
# Enforce strong TLS
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
# Rate limiting zone created earlier
limit_req zone=keylimit burst=2 nodelay;
location /keys/ {
alias /srv/keys/;
# Basic auth
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
# Disable directory listing
autoindex off;
# Prevent caching
add_header Cache-Control "no-store, no-cache, must-revalidate, max-age=0" always;
}
}
```
6. Enable the host:
```
sudo ln -s /etc/nginx/sites-available/keyserver.conf /etc/nginx/sites-enabled/
```
7. Disable default host:
```
sudo rm /etc/nginx/sites-enabled/default
```

View file

@ -1,2 +0,0 @@
[keyserver]
130.162.165.220 ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_ed25519

View file

@ -1,31 +0,0 @@
variable "tls_secret_name" {}
variable "notify_url" {}
resource "kubernetes_namespace" "kured" {
metadata {
name = "kured"
labels = {
"istio-injection" : "disabled"
}
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.kured.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "helm_release" "kured" {
namespace = kubernetes_namespace.kured.metadata[0].name
create_namespace = false
name = "kured"
repository = "https://kubereboot.github.io/charts"
chart = "kured"
values = [templatefile("${path.module}/values.yaml", { notify_url : var.notify_url })]
atomic = true
depends_on = [kubernetes_namespace.kured]
}

View file

@ -1,12 +0,0 @@
window_start: "22:00"
window_end: "06:00"
reboot_days: "mon,tue,wed,thu,fri"
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "8080"
configuration:
notifyUrl: "${notify_url}"

View file

@ -1,93 +0,0 @@
replicaCount: 1
deployment:
image: quay.io/go-skynet/local-ai:latest
env:
threads: 4
context_size: 512
modelsPath: "/models"
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Prompt templates to include
# Note: the keys of this map will be the names of the prompt template files
promptTemplates:
{}
# ggml-gpt4all-j.tmpl: |
# The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
# ### Prompt:
# {{.Input}}
# ### Response:
# Models to download at runtime
models:
# Whether to force download models even if they already exist
forceDownload: false
# The list of URLs to download models from
# Note: the name of the file will be the name of the loaded model
list:
- url:
"https://gpt4all.io/models/ggml-gpt4all-j.bin"
# basicAuth: base64EncodedCredentials
# Persistent storage for models and prompt templates.
# PVC and HostPath are mutually exclusive. If both are enabled,
# PVC configuration takes precedence. If neither are enabled, ephemeral
# storage is used.
persistence:
pvc:
enabled: false
size: 2Gi
accessModes:
- ReadWriteOnce
annotations: {}
# Optional
storageClass: ~
hostPath:
enabled: false
path: "/models"
service:
type: ClusterIP
port: 80
annotations: {}
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200"
ingress:
enabled: true
className: "nginx"
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: ai.viktorbarzin.me
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: "${tls_secret}"
hosts:
- ai.viktorbarzin.me
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -1,21 +0,0 @@
variable "tls_secret_name" {}
resource "helm_release" "prometheus" {
namespace = "localai"
create_namespace = true
name = "localai"
repository = "https://go-skynet.github.io/helm-charts/"
chart = "local-ai"
# version = "15.0.2"
# atomic = true
# cleanup_on_fail = true
values = [templatefile("${path.module}/chart_values.tpl", { tls_secret = var.tls_secret_name })]
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = "localai"
tls_secret_name = var.tls_secret_name
}

View file

@ -1,3 +0,0 @@
# All service modules have been migrated to individual Terragrunt stacks under stacks/.
# See stacks/<service>/main.tf for each service's configuration.
# This file is no longer used.

View file

@ -1,310 +0,0 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "postgresql_password" {}
variable "cookie_secret" {}
variable "captcha_salt" {}
locals {
domain = "mcaptcha.viktorbarzin.me"
port = 7000
}
resource "kubernetes_namespace" "mcaptcha" {
metadata {
name = "mcaptcha"
labels = {
"istio-injection" : "disabled"
tier = var.tier
}
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
tls_secret_name = var.tls_secret_name
}
# mCaptcha requires a special Redis with the mcaptcha/cache module loaded
resource "kubernetes_deployment" "mcaptcha_redis" {
metadata {
name = "mcaptcha-redis"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
labels = {
app = "mcaptcha-redis"
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "mcaptcha-redis"
}
}
strategy {
type = "Recreate"
}
template {
metadata {
labels = {
app = "mcaptcha-redis"
}
}
spec {
container {
image = "mcaptcha/cache:latest"
name = "redis"
port {
container_port = 6379
}
resources {
requests = {
memory = "64Mi"
cpu = "25m"
}
limits = {
memory = "128Mi"
cpu = "200m"
}
}
liveness_probe {
tcp_socket {
port = 6379
}
initial_delay_seconds = 10
period_seconds = 10
}
readiness_probe {
tcp_socket {
port = 6379
}
initial_delay_seconds = 5
period_seconds = 5
}
}
}
}
}
}
resource "kubernetes_service" "mcaptcha_redis" {
metadata {
name = "mcaptcha-redis"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
labels = {
app = "mcaptcha-redis"
}
}
spec {
selector = {
app = "mcaptcha-redis"
}
port {
name = "redis"
port = 6379
target_port = 6379
}
}
}
resource "kubernetes_deployment" "mcaptcha" {
metadata {
name = "mcaptcha"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
labels = {
app = "mcaptcha"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "mcaptcha"
}
}
strategy {
type = "Recreate"
}
template {
metadata {
labels = {
app = "mcaptcha"
}
annotations = {
"diun.enable" = "true"
"diun.include_tags" = "^\\d+(?:\\.\\d+)?(?:\\.\\d+)?$"
}
}
spec {
container {
image = "mcaptcha/mcaptcha:latest"
name = "mcaptcha"
port {
container_port = local.port
}
# Required configuration
env {
name = "MCAPTCHA_server_DOMAIN"
value = local.domain
}
env {
name = "MCAPTCHA_server_COOKIE_SECRET"
value = var.cookie_secret
}
env {
name = "MCAPTCHA_captcha_SALT"
value = var.captcha_salt
}
# Server configuration
env {
name = "PORT"
value = tostring(local.port)
}
env {
name = "MCAPTCHA_server_IP"
value = "0.0.0.0"
}
env {
name = "MCAPTCHA_server_PROXY_HAS_TLS"
value = "true"
}
# Database configuration (PostgreSQL)
env {
name = "DATABASE_URL"
value = "postgres://mcaptcha:${var.postgresql_password}@postgresql.dbaas.svc.cluster.local:5432/mcaptcha"
}
# Redis configuration (using mcaptcha/cache module)
env {
name = "MCAPTCHA_redis_URL"
value = "redis://mcaptcha-redis.mcaptcha.svc.cluster.local:6379"
}
# Feature flags
env {
name = "MCAPTCHA_allow_registration"
# value = "true"
value = "false"
}
env {
name = "MCAPTCHA_allow_demo"
value = "false"
}
env {
name = "MCAPTCHA_commercial"
value = "false"
}
env {
name = "MCAPTCHA_captcha_ENABLE_STATS"
value = "true"
}
env {
name = "MCAPTCHA_captcha_GC"
value = "30"
}
env {
name = "MCAPTCHA_debug"
value = "false"
}
env {
name = "RUST_BACKTRACE"
value = "1"
}
resources {
requests = {
memory = "64Mi"
cpu = "50m"
}
limits = {
memory = "256Mi"
cpu = "500m"
}
}
# Health checks
liveness_probe {
http_get {
path = "/"
port = local.port
}
initial_delay_seconds = 30
period_seconds = 10
timeout_seconds = 5
failure_threshold = 3
}
readiness_probe {
http_get {
path = "/"
port = local.port
}
initial_delay_seconds = 10
period_seconds = 5
timeout_seconds = 3
failure_threshold = 3
}
}
}
}
}
}
resource "kubernetes_service" "mcaptcha" {
metadata {
name = "mcaptcha"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
labels = {
"app" = "mcaptcha"
}
}
spec {
selector = {
app = "mcaptcha"
}
port {
name = "http"
port = 80
target_port = local.port
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.mcaptcha.metadata[0].name
name = "mcaptcha"
tls_secret_name = var.tls_secret_name
}

View file

@ -1,400 +0,0 @@
# variable "host" {
# type = string
# }
resource "kubernetes_namespace" "oauth2" {
metadata {
name = "oauth2"
# cookie seems to be not set and auth fails
# labels = {
# "istio-injection" : "enabled"
# }
}
}
variable "tls_secret_name" {
type = string
}
variable "oauth2_proxy_client_secret" {
type = string
}
variable "oauth2_proxy_client_id" {
type = string
}
variable "authenticated_emails" {
type = string
default = ""
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = "oauth2"
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_config_map" "config" {
metadata {
name = "oauth2-proxy-nginx"
namespace = "oauth2"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"nginx.conf" = <<-EOT
worker_processes 5;
events {
}
http {
server {
listen 80 default_server;
location = /healthcheck {
add_header Content-Type text/plain;
return 200 'ok';
}
location ~ /redirect/(.*) {
return 307 https://$1$is_args$args;
}
}
}
EOT
}
}
resource "kubernetes_config_map" "authorized-emails" {
metadata {
name = "authorized-emails"
namespace = "oauth2"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"authorized_emails.txt" = var.authenticated_emails
}
}
resource "random_password" "cookie" {
length = 16
special = true
override_special = "_%@"
}
resource "kubernetes_deployment" "oauth2-proxy" {
metadata {
name = "oauth2-proxy"
namespace = "oauth2"
labels = {
app = "oauth2"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "oauth2"
}
}
template {
metadata {
labels = {
app = "oauth2"
}
}
spec {
container {
image = "nginx:latest"
name = "nginx"
port {
name = "http"
container_port = 80
protocol = "TCP"
}
volume_mount {
name = "config"
mount_path = "/etc/nginx/"
}
liveness_probe {
http_get {
path = "/healthcheck"
port = 80
}
}
}
container {
image = "quay.io/pusher/oauth2_proxy:latest"
name = "oauth2-proxy"
args = ["--provider=google", "--upstream=file:///dev/null", "--upstream=http://localhost/redirect/", "--http-address=0.0.0.0:4180", "--cookie-domain=.viktorbarzin.me", "--footer=-", "--authenticated-emails-file=/etc/authorized_emails/authorized_emails.txt"]
# args = ["--provider=google", "--upstream=file:///dev/null", "--upstream=http://localhost/redirect/", "--http-address=0.0.0.0:4180", "--cookie-domain=.viktorbarzin.me", "--footer=-", "--email-domain=*", "--google-group=barzini-lab-admins@googlegroups.com", "--google-admin-email=vbarzin@gmail.com", "--google-service-account-json=/etc/google_service_account/google_service_account.json"]
# args = ["--provider=google", "--upstream=file:///dev/null", "--upstream=http://localhost/redirect/", "--http-address=0.0.0.0:4180", "--cookie-domain=.viktorbarzin.me", "--footer=-", "--email-domain=*", "--google-group=barzini-lab-admins", "--google-admin-email=533122798643-compute@developer.gserviceaccount.com", "--google-service-account-json=/etc/google_service_account/google_service_account.json"]
env {
name = "OAUTH2_PROXY_CLIENT_ID"
value = var.oauth2_proxy_client_id
}
env {
name = "OAUTH2_PROXY_CLIENT_SECRET"
value = var.oauth2_proxy_client_secret
}
env {
name = "OAUTH2_PROXY_COOKIE_SECRET"
value = random_password.cookie.result
}
port {
name = "oauth"
container_port = 4180
protocol = "TCP"
}
volume_mount {
name = "authorized-emails"
mount_path = "/etc/authorized_emails"
}
# volume_mount {
# name = "sa-json"
# mount_path = "/etc/google_service_account/"
# }
}
volume {
name = "config"
config_map {
name = "oauth2-proxy-nginx"
}
}
volume {
name = "authorized-emails"
config_map {
name = "authorized-emails"
}
}
# volume {
# name = "sa-json"
# config_map {
# name = "google-service-account"
# }
# }
}
}
}
}
resource "kubernetes_service" "oauth_proxy" {
metadata {
name = "oauth2"
namespace = "oauth2"
labels = {
app = "oauth2"
}
}
spec {
selector = {
app = "oauth2"
}
port {
name = "http"
port = "80"
target_port = 4180
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = "oauth2"
name = "oauth2"
tls_secret_name = var.tls_secret_name
}
# variable "svc_name" {
# type = string
# }
# variable "client_id" {}
# variable "client_secret" {}
# resource "kubernetes_deployment" "oauth_proxy" {
# metadata {
# name = "oauth-proxy"
# namespace = var.namespace
# labels = {
# run = "oauth-proxy"
# }
# }
# spec {
# replicas = 1
# selector {
# match_labels = {
# run = "oauth-proxy"
# }
# }
# template {
# metadata {
# labels = {
# run = "oauth-proxy"
# }
# }
# spec {
# container {
# image = "quay.io/oauth2-proxy/oauth2-proxy:latest"
# args = ["--provider=google", "--email-domain=*", "upstream=file:///dev/null", "--http-address=0.0.0.0:4180"]
# name = "oauth-proxy"
# image_pull_policy = "IfNotPresent"
# resources {
# limits = {
# cpu = "0.5"
# memory = "512Mi"
# }
# requests = {
# cpu = "250m"
# memory = "50Mi"
# }
# }
# port {
# container_port = 4180
# }
# env {
# name = "OAUTH2_PROXY_CLIENT_ID"
# value = var.client_id
# }
# env {
# name = "OAUTH2_PROXY_CLIENT_SECRET"
# value = var.client_secret
# }
# env {
# name = "OAUTH2_PROXY_COOKIE_SECRET"
# value = random_password.cookie.result
# }
# }
# }
# }
# }
# }
# resource "kubernetes_service" "oauth_proxy" {
# metadata {
# name = var.svc_name
# namespace = var.namespace
# labels = {
# run = "oauth-proxy"
# }
# }
# spec {
# selector = {
# run = "oauth-proxy"
# }
# port {
# name = "http"
# port = "80"
# target_port = "4180"
# }
# }
# }
# resource "kubernetes_ingress_v1" "oauth" {
# metadata {
# name = "oauth-ingress"
# namespace = var.namespace
# annotations = {
# "kubernetes.io/ingress.class" = "nginx"
# "nginx.ingress.kubernetes.io/use-regex" = "true"
# }
# }
# spec {
# tls {
# hosts = [var.host]
# secret_name = var.tls_secret_name
# }
# rule {
# host = var.host
# http {
# path {
# path = "/oauth2/.*"
# backend {
# service {
# name = var.svc_name
# port {
# number = 80
# }
# }
# }
# }
# }
# }
# }
# }
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# labels:
# k8s-app: oauth2-proxy
# name: oauth2-proxy
# namespace: kube-system
# spec:
# replicas: 1
# selector:
# matchLabels:
# k8s-app: oauth2-proxy
# template:
# metadata:
# labels:
# k8s-app: oauth2-proxy
# spec:
# containers:
# - args:
# - --provider=github
# - --email-domain=*
# - --upstream=file:///dev/null
# - --http-address=0.0.0.0:4180
# # Register a new application
# # https://github.com/settings/applications/new
# env:
# - name: OAUTH2_PROXY_CLIENT_ID
# value: <Client ID>
# - name: OAUTH2_PROXY_CLIENT_SECRET
# value: <Client Secret>
# # docker run -ti --rm python:3-alpine python -c 'import secrets,base64; print(base64.b64encode(base64.b64encode(secrets.token_bytes(16))));'
# - name: OAUTH2_PROXY_COOKIE_SECRET
# value: SECRET
# image: quay.io/oauth2-proxy/oauth2-proxy:latest
# imagePullPolicy: Always
# name: oauth2-proxy
# ports:
# - containerPort: 4180
# protocol: TCP
# ---
# apiVersion: v1
# kind: Service
# metadata:
# labels:
# k8s-app: oauth2-proxy
# name: oauth2-proxy
# namespace: kube-system
# spec:
# ports:
# - name: http
# port: 4180
# protocol: TCP
# targetPort: 4180
# selector:
# k8s-app: oauth2-proxy

View file

@ -1,87 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "openid_help_page" {
metadata {
name = "openid-help-page"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = "openid-help-page"
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_deployment" "openid_help_page" {
metadata {
name = "openid-help-page"
namespace = "openid-help-page"
labels = {
app = "openid-help-page"
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "openid-help-page"
}
}
template {
metadata {
labels = {
app = "openid-help-page"
}
}
spec {
container {
image = "viktorbarzin/openid-create-account-help-webpage:latest"
name = "openid-help-page"
resources {
limits = {
cpu = "0.5"
memory = "512Mi"
}
requests = {
cpu = "250m"
memory = "50Mi"
}
}
port {
container_port = 80
}
}
}
}
}
}
resource "kubernetes_service" "openid_help_page" {
metadata {
name = "openid-help-page"
namespace = "openid-help-page"
}
spec {
port {
name = "service-port"
protocol = "TCP"
port = 80
target_port = "80"
}
selector = {
app = "openid-help-page"
}
type = "ClusterIP"
session_affinity = "None"
}
}
module "ingress" {
source = "../ingress_factory"
namespace = "openid-help-page"
name = "openid-help-page"
host = "kubectl"
tls_secret_name = var.tls_secret_name
}

View file

@ -1,201 +0,0 @@
variable "tls_secret_name" {}
variable "web_password" {}
resource "kubernetes_namespace" "pihole" {
metadata {
name = "pihole"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.pihole.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_config_map" "external_conf" {
metadata {
name = "external-conf"
namespace = kubernetes_namespace.pihole.metadata[0].name
labels = {
app = "pihole"
}
}
data = {
"external.conf" = "$HTTP[\"host\"] == \"pihole.viktorbarzin.me\" {\n server.document-root = \"/var/www/html/admin/\"\n}\n"
}
}
resource "kubernetes_deployment" "pihole" {
metadata {
name = "pihole"
namespace = kubernetes_namespace.pihole.metadata[0].name
labels = {
app = "pihole"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "pihole"
}
}
template {
metadata {
labels = {
app = "pihole"
}
}
spec {
container {
image = "pihole/pihole:latest"
name = "pihole"
resources {
limits = {
cpu = "1"
memory = "1Gi"
}
requests = {
cpu = "1"
memory = "1Gi"
}
}
port {
container_port = 80
}
env {
name = "DNS1"
value = "10.0.20.200#5354" # bind
}
env {
name = "VIRTUAL_HOST"
value = "pihole.viktorbarzin.me"
}
env {
name = "WEBPASSWORD"
value = var.web_password
}
env {
name = "TZ"
value = "Europe/Sofia"
}
volume_mount {
name = "external-conf"
mount_path = "/tmp/external.conf"
sub_path = "external.conf"
}
volume_mount {
name = "pihole-local-etc-volume"
mount_path = "/etc/pihole"
}
volume_mount {
name = "pihole-local-dnsmasq-volume"
mount_path = "/etc/dnsmasq.d"
}
}
volume {
name = "external-conf"
config_map {
name = "external-conf"
}
}
volume {
name = "pihole-local-etc-volume"
empty_dir {} # no hard dependencies on truenas which needs dns
}
volume {
name = "pihole-local-dnsmasq-volume"
empty_dir {} # no hard dependencies on truenas which needs dns
}
}
}
}
}
resource "kubernetes_service" "pihole-dns" {
metadata {
name = "pihole-dns"
namespace = kubernetes_namespace.pihole.metadata[0].name
labels = {
"app" = "pihole"
}
annotations = {
"metallb.universe.tf/allow-shared-ip" : "shared"
}
}
spec {
# type = "LoadBalancer"
# external_traffic_policy = "Cluster"
selector = {
app = "pihole"
}
port {
name = "dns-udp"
port = "53"
protocol = "UDP"
}
}
}
resource "kubernetes_service" "pihole-web" {
metadata {
name = "pihole-web"
namespace = kubernetes_namespace.pihole.metadata[0].name
labels = {
"app" = "pihole"
}
annotations = {
"metallb.universe.tf/allow-shared-ip" : "shared"
}
}
spec {
selector = {
app = "pihole"
}
port {
name = "dns-web"
port = "80"
}
}
}
resource "kubernetes_ingress_v1" "pihole" {
metadata {
name = "pihole-ingress"
namespace = kubernetes_namespace.pihole.metadata[0].name
annotations = {
"traefik.ingress.kubernetes.io/router.middlewares" = "traefik-rate-limit@kubernetescrd,traefik-csp-headers@kubernetescrd,traefik-crowdsec@kubernetescrd"
"traefik.ingress.kubernetes.io/router.entrypoints" = "websecure"
"traefik.ingress.kubernetes.io/router.tls.options" = "traefik-mtls@kubernetescrd"
}
}
spec {
ingress_class_name = "traefik"
tls {
hosts = ["pihole.viktorbarzin.me"]
secret_name = var.tls_secret_name
}
rule {
host = "pihole.viktorbarzin.me"
http {
path {
path = "/"
backend {
service {
name = "pihole-web"
port {
number = 80
}
}
}
}
}
}
}
}

View file

@ -1,23 +0,0 @@
global:
namespace: "vault"
image:
repository: "hashicorp/vault-k8s"
tag: "1.7.0"
agentImage:
repository: "hashicorp/vault"
tag: "1.20.4"
injector:
metrics:
enabled: true
server:
image:
repository: "hashicorp/vault"
tag: "1.20.4"
enabled: true
volumes:
- name: data
emptyDir: {}
ingress:
enabled: false
ui:
enabled: true

View file

@ -1,61 +0,0 @@
variable "tls_secret_name" {}
variable "host" {
default = "vault.viktorbarzin.me"
}
variable "tier" { type = string }
resource "kubernetes_namespace" "vault" {
metadata {
name = "vault"
labels = {
tier = var.tier
}
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.vault.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_persistent_volume" "vault_data" {
metadata {
name = "vault-data-pv"
}
spec {
capacity = {
"storage" = "10Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
nfs {
server = "10.0.10.15"
path = "/mnt/main/vault"
}
}
}
}
resource "helm_release" "vault" {
namespace = kubernetes_namespace.vault.metadata[0].name
name = "vault"
atomic = true
repository = "https://helm.releases.hashicorp.com"
chart = "vault"
values = [templatefile("${path.module}/chart_values.tpl", { host = var.host, tls_secret_name = var.tls_secret_name })]
depends_on = [kubernetes_persistent_volume.vault_data]
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.vault.metadata[0].name
name = "vault"
service_name = "vault-ui"
port = 8200
tls_secret_name = var.tls_secret_name
protected = true
}

View file

@ -1,8 +0,0 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "3.0.1"
}
}
}

View file

@ -1,216 +0,0 @@
variable "tls_secret_name" {}
resource "kubernetes_namespace" "vikunja" {
metadata {
name = "vikunja"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.vikunja.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_deployment" "vikunja" {
metadata {
name = "vikunja"
namespace = kubernetes_namespace.vikunja.metadata[0].name
labels = {
app = "vikunja"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "vikunja"
}
}
template {
metadata {
labels = {
app = "vikunja"
}
}
spec {
container {
image = "vikunja/api"
name = "api"
# General settings
env {
name = "VIKUNJA_SERVICE_TIMEZONE"
value = "Europe/London"
}
env {
name = "VIKUNJA_SERVICE_ENABLEREGISTRATION"
value = "true"
}
env {
name = "VIKUNJA_LOG_LEVEL"
value = "DEBUG"
}
# Frontend Settings
env {
name = "VIKUNJA_SERVICE_JWTSECRET"
value = "vikunja"
}
env {
name = "VIKUNJA_SERVICE_FRONTENDURL"
value = "https://todo.viktorbarzin.me/"
}
# DB Settings
env {
name = "VIKUNJA_DATABASE_HOST"
value = "mysql.dbaas.svc.cluster.local"
}
env {
name = "VIKUNJA_DATABASE_PASSWORD"
value = "" # ADD ME
}
env {
name = "VIKUNJA_DATABASE_TYPE"
value = "mysql"
}
env {
name = "VIKUNJA_DATABASE_USER"
value = "vikunja"
}
env {
name = "VIKUNJA_DATABASE_DATABASE"
value = "vikunja"
}
env {
name = "VIKUNJA_LOG_DATABASE"
value = "true"
}
env {
name = "VIKUNJA_LOG_DATABASELEVEL"
value = "DEBUG"
}
# Mailser settings
env {
name = "VIKUNJA_MAILER_ENABLED"
value = "true"
}
env {
name = "VIKUNJA_MAILER_HOST"
value = "mailserver.mailserver.svc.cluster.local"
}
env {
name = "VIKUNJA_MAILER_USERNAME"
value = "me@viktorbarzin.me"
}
env {
name = "VIKUNJA_MAILER_PASSWORD"
value = "" # TODO: add me
}
env {
name = "VIKUNJA_MAILER_FROMEMAIL"
value = "todo@viktorbarzin.me"
}
# TODOIST settings
env {
name = "VIKUNJA_MIGRATION_TODOIST_ENABLE"
value = "true"
}
env {
name = "VIKUNJA_MIGRATION_TODOIST_CLIENTID"
value = "" # TODO: add me
}
env {
name = "VIKUNJA_MIGRATION_TODOIST_CLIENTSECRET"
value = "" # TODO: add me
}
env {
name = "VIKUNJA_MIGRATION_TODOIST_REDIRECTURL"
value = "https://todo.viktorbarzin.me/migrate/todoist"
}
port {
name = "api"
container_port = 3456
}
}
container {
image = "vikunja/frontend"
name = "frontend"
port {
name = "http"
container_port = 80
}
}
}
}
}
}
resource "kubernetes_service" "vikunja" {
metadata {
name = "vikunja"
namespace = kubernetes_namespace.vikunja.metadata[0].name
labels = {
"app" = "vikunja"
}
}
spec {
selector = {
app = "vikunja"
}
port {
name = "http"
target_port = 80
port = 80
protocol = "TCP"
}
}
}
resource "kubernetes_service" "api" {
metadata {
name = "api"
namespace = kubernetes_namespace.vikunja.metadata[0].name
labels = {
"app" = "vikunja"
}
}
spec {
selector = {
app = "vikunja"
}
port {
name = "api"
target_port = 3456
port = 3456
protocol = "TCP"
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.vikunja.metadata[0].name
name = "vikunja"
host = "todo"
tls_secret_name = var.tls_secret_name
}
module "ingress-api" {
source = "../ingress_factory"
namespace = kubernetes_namespace.vikunja.metadata[0].name
name = "vikunja-api"
host = "todo"
service_name = "api"
port = 3456
ingress_path = ["/api/"]
tls_secret_name = var.tls_secret_name
}