move out all monitoring resources to separate tf files [ci skip]

This commit is contained in:
Viktor Barzin 2025-12-28 20:07:00 +00:00
parent 26d55c6637
commit a595c4db56
6 changed files with 469 additions and 604 deletions

View file

@ -0,0 +1,113 @@
resource "kubernetes_config_map" "redfish-config" {
metadata {
name = "redfish-exporter-config"
namespace = "monitoring"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"config.yml" = <<-EOF
address: 0.0.0.0
port: 9610
hosts:
${var.idrac_host}:
username: ${var.idrac_username}
password: ${var.idrac_password}
default:
username: root
password: calvin
metrics:
all: true
EOF
}
}
resource "kubernetes_deployment" "idrac-redfish" {
metadata {
name = "idrac-redfish-exporter"
namespace = "monitoring"
labels = {
app = "idrac-redfish-exporter"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "idrac-redfish-exporter"
}
}
template {
metadata {
labels = {
app = "idrac-redfish-exporter"
}
}
spec {
container {
# https://github.com/mrlhansen/idrac_exporter?tab=readme-ov-file
image = "ghcr.io/mrlhansen/idrac_exporter:latest"
name = "redfish-exporter"
port {
container_port = 9610
}
volume_mount {
name = "redfish-exporter-config"
mount_path = "/etc/prometheus/idrac.yml"
sub_path = "config.yml"
}
}
volume {
name = "redfish-exporter-config"
config_map {
name = "redfish-exporter-config"
}
}
}
}
}
}
resource "kubernetes_service" "idrac-redfish-exporter" {
metadata {
name = "idrac-redfish-exporter"
namespace = "monitoring"
labels = {
"app" = "idrac-redfish-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "9090"
}
}
spec {
selector = {
"app" = "idrac-redfish-exporter"
}
port {
name = "http"
port = "9090"
target_port = "9610"
}
}
}
module "idrac-redfish-exporter-ingress" {
source = "../ingress_factory"
namespace = "monitoring"
name = "idrac-redfish-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9090
}

View file

@ -0,0 +1,83 @@
# resource "helm_release" "loki" {
# namespace = "monitoring"
# create_namespace = true
# name = "loki"
# repository = "https://grafana.github.io/helm-charts"
# chart = "loki"
# values = [templatefile("${path.module}/loki.yaml", {})]
# atomic = true
# timeout = 120
# }
# resource "kubernetes_persistent_volume" "loki" {
# metadata {
# name = "loki"
# }
# spec {
# capacity = {
# storage = "15Gi"
# }
# access_modes = ["ReadWriteOnce"]
# persistent_volume_source {
# nfs {
# path = "/mnt/main/loki/loki"
# server = "10.0.10.15"
# }
# }
# persistent_volume_reclaim_policy = "Retain"
# volume_mode = "Filesystem"
# }
# }
# resource "kubernetes_persistent_volume" "loki-minio" {
# metadata {
# name = "loki-minio"
# }
# spec {
# capacity = {
# storage = "15Gi"
# }
# access_modes = ["ReadWriteMany"]
# persistent_volume_source {
# nfs {
# path = "/mnt/main/loki/minio"
# server = "10.0.10.15"
# }
# }
# persistent_volume_reclaim_policy = "Retain"
# volume_mode = "Filesystem"
# }
# }
# https://grafana.com/docs/alloy/latest/configure/kubernetes/
# resource "helm_release" "alloy" {
# namespace = "monitoring"
# create_namespace = true
# name = "alloy"
# repository = "https://grafana.github.io/helm-charts"
# chart = "alloy"
# atomic = true
# }
# Increase open file limits as alloy is reading files:
# https://serverfault.com/questions/1137211/failed-to-create-fsnotify-watcher-too-many-open-files
# run for all nodes using :
# for n in $(kbn | awk '{print $1}'); do echo $n; s wizard@$n 'sudo sysctl -w fs.inotify.max_user_watches=2099999999; sudo sysctl -w fs.inotify.max_user_instances=2099999999;sudo sysctl -w fs.inotify.max_queued_events=2099999999'; done
# resource "helm_release" "k8s-monitoring" {
# namespace = "monitoring"
# create_namespace = true
# name = "k8s-monitoring"
# repository = "https://grafana.github.io/helm-charts"
# chart = "k8s-monitoring"
# values = [templatefile("${path.module}/k8s-monitoring-values.yaml", {})]
# atomic = true
# }

View file

@ -20,64 +20,6 @@ module "tls_secret" {
namespace = "monitoring"
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_persistent_volume_claim" "prometheus_server_pvc" {
metadata {
name = "prometheus-iscsi-pvc"
namespace = "monitoring"
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = "15Gi"
}
}
# storage_class_name = "standard"
volume_name = "prometheus-iscsi-pv"
}
}
resource "kubernetes_persistent_volume" "prometheus_server_pvc" {
metadata {
name = "prometheus-iscsi-pv"
}
spec {
capacity = {
storage = "15Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
nfs {
path = "/mnt/main/prometheus"
server = "10.0.10.15"
}
# iscsi {
# fs_type = "ext4"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:monitoring:prometheus"
# lun = 0
# target_portal = "iscsi.viktorbarzin.me:3260"
# }
}
persistent_volume_reclaim_policy = "Retain"
volume_mode = "Filesystem"
}
}
resource "helm_release" "prometheus" {
namespace = "monitoring"
create_namespace = true
name = "prometheus"
repository = "https://prometheus-community.github.io/helm-charts"
chart = "prometheus"
# version = "15.0.2"
version = "25.8.2"
values = [templatefile("${path.module}/prometheus_chart_values.tpl", { alertmanager_mail_pass = var.alertmanager_account_password, alertmanager_slack_api_url = var.alertmanager_slack_api_url, tuya_api_key = var.tiny_tuya_service_secret, haos_api_token = var.haos_api_token })]
}
# Terraform get angry with the 30k values file :/ use ansible until solved
# resource "helm_release" "ups_prometheus_snmp_exporter" {
# namespace = "monitoring"
@ -90,141 +32,7 @@ resource "helm_release" "prometheus" {
# values = [file("${path.module}/ups_snmp_values.yaml")]
# }
# resource "kubernetes_secret" "prometheus_grafana_datasource" {
# metadata {
# name = "prometheus-grafana-datasource"
# namespace = "monitoring"
# labels = {
# grafana_datasource = "1"
# }
# }
# data = {
# "datasource.yaml" = <<EOT
# # config file version
# apiVersion: 1
# # list of datasources that should be deleted from the database
# #deleteDatasources:
# # - name: Prometheus
# # orgId: 1
# # list of datasources to insert/update depending
# # whats available in the database
# datasources:
# # <string, required> name of the datasource. Required
# - name: Prometheus
# # <string, required> datasource type. Required
# type: prometheus
# # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
# access: proxy
# # <int> org id. will default to orgId 1 if not specified
# orgId: 1
# # <string> url
# url: http://prometheus-server
# # <string> database password, if used
# password:
# # <string> database user, if used
# user:
# # <string> database name, if used
# database:
# # <bool> enable/disable basic auth
# basicAuth:
# # <string> basic auth username
# basicAuthUser:
# # <string> basic auth password
# basicAuthPassword:
# # <bool> enable/disable with credentials headers
# withCredentials:
# # <bool> mark as default datasource. Max one per org
# isDefault:
# # <map> fields that will be converted to json and stored in json_data
# #jsonData:
# # graphiteVersion: \"1.1\"
# # tlsAuth: true
# # tlsAuthWithCACert: true
# # <string> json object of data that will be encrypted.
# #secureJsonData:
# # tlsCACert: \"...\"
# # tlsClientCert: \"...\"
# # tlsClientKey: \"...\"
# version: 1
# # <bool> allow users to edit datasources from the UI.
# editable: false
# EOT
# }
# type = "Opaque"
# }
resource "kubernetes_persistent_volume" "prometheus_grafana_pv" {
metadata {
name = "grafana-pv"
}
spec {
capacity = {
"storage" = "2Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
nfs {
path = "/mnt/main/grafana"
server = "10.0.10.15"
}
# iscsi {
# target_portal = "iscsi.viktorbarzin.lan:3260"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:monitoring:grafana"
# lun = 0
# fs_type = "ext4"
# }
}
}
}
resource "kubernetes_persistent_volume" "alertmanager_pv" {
metadata {
name = "alertmanager-pv"
}
spec {
capacity = {
"storage" = "2Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
nfs {
path = "/mnt/main/alertmanager"
server = "10.0.10.15"
}
}
}
}
resource "kubernetes_persistent_volume_claim" "grafana_pvc" {
metadata {
name = "grafana-pvc"
namespace = "monitoring"
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
"storage" = "2Gi"
}
}
}
}
resource "helm_release" "grafana" {
namespace = "monitoring"
create_namespace = true
name = "grafana"
atomic = true
repository = "https://grafana.github.io/helm-charts"
chart = "grafana"
values = [templatefile("${path.module}/grafana_chart_values.yaml", { db_password = var.grafana_db_password })]
}
resource "kubernetes_cron_job_v1" "monitor_prom" {
metadata {
@ -324,415 +132,3 @@ resource "kubernetes_ingress_v1" "status_yotovski" {
}
}
resource "kubernetes_config_map" "redfish-config" {
metadata {
name = "redfish-exporter-config"
namespace = "monitoring"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"config.yml" = <<-EOF
address: 0.0.0.0
port: 9610
hosts:
${var.idrac_host}:
username: ${var.idrac_username}
password: ${var.idrac_password}
default:
username: root
password: calvin
metrics:
all: true
EOF
}
}
resource "kubernetes_deployment" "idrac-redfish" {
metadata {
name = "idrac-redfish-exporter"
namespace = "monitoring"
labels = {
app = "idrac-redfish-exporter"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "idrac-redfish-exporter"
}
}
template {
metadata {
labels = {
app = "idrac-redfish-exporter"
}
}
spec {
container {
# https://github.com/mrlhansen/idrac_exporter?tab=readme-ov-file
image = "ghcr.io/mrlhansen/idrac_exporter:latest"
name = "redfish-exporter"
port {
container_port = 9610
}
volume_mount {
name = "redfish-exporter-config"
mount_path = "/etc/prometheus/idrac.yml"
sub_path = "config.yml"
}
}
volume {
name = "redfish-exporter-config"
config_map {
name = "redfish-exporter-config"
}
}
}
}
}
}
resource "kubernetes_service" "idrac-redfish-exporter" {
metadata {
name = "idrac-redfish-exporter"
namespace = "monitoring"
labels = {
"app" = "idrac-redfish-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "9090"
}
}
spec {
selector = {
"app" = "idrac-redfish-exporter"
}
port {
name = "http"
port = "9090"
target_port = "9610"
}
}
}
module "idrac-redfish-exporter-ingress" {
source = "../ingress_factory"
namespace = "monitoring"
name = "idrac-redfish-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9090
}
/**
1. clone snmp exporter
2. update generator.yaml to include only interesting modules
3. make generate
4. cp snmp.yml to whereever is used
5. scrape service with curl 'http://snmp-exporter.monitoring.svc.cluster.local:9116/snmp?auth=public_v2&module=huawei&target=192.168.1.5%3A161'
generate reference - https://github.com/prometheus/snmp_exporter/tree/main/generator
https://sbcode.net/prometheus/snmp-generate-huawei/
*/
resource "kubernetes_config_map" "snmp-exporter-yaml" {
metadata {
name = "snmp-exporter-yaml"
namespace = "monitoring"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"snmp.yml" = file("${path.module}/ups_snmp_values.yaml")
}
}
resource "kubernetes_deployment" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = "monitoring"
labels = {
app = "snmp-exporter"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "snmp-exporter"
}
}
template {
metadata {
labels = {
app = "snmp-exporter"
}
}
spec {
container {
image = "prom/snmp-exporter"
name = "snmp-exporter"
# command = ["/usr/local/bin/redfish_exporter", "--config.file", "/app/config.yml"]
port {
container_port = 9116
}
volume_mount {
name = "config-volume"
mount_path = "/etc/snmp_exporter/"
}
}
volume {
name = "config-volume"
config_map {
name = "snmp-exporter-yaml"
}
}
}
}
}
}
resource "kubernetes_service" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = "monitoring"
labels = {
"app" = "snmp-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/snmp?auth=Public0&target=tcp%3A%2F%2F192.%3A161"
"prometheus.io/port" = "9116"
}
}
spec {
selector = {
"app" = "snmp-exporter"
}
port {
name = "http"
port = "9116"
target_port = "9116"
}
}
}
module "snmp-exporter-ingress" {
source = "../ingress_factory"
namespace = "monitoring"
name = "snmp-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9116
}
resource "kubernetes_secret" "pve_exporter_config" {
metadata {
name = "pve-exporter-config"
namespace = "monitoring"
}
data = {
"pve.yml" = <<-EOF
default:
user: "root@pam"
password: ${var.pve_password}
verify_ssl: false
timeout: 30
EOF
}
}
resource "kubernetes_deployment" "pve_exporter" {
metadata {
name = "proxmox-exporter"
namespace = "monitoring"
}
spec {
replicas = 1
selector {
match_labels = {
app = "proxmox-exporter"
}
}
template {
metadata {
labels = {
app = "proxmox-exporter"
}
}
spec {
container {
name = "proxmox-exporter"
image = "prompve/prometheus-pve-exporter:latest"
port {
container_port = 9221
}
# Mount the file into the container
volume_mount {
name = "config-volume"
mount_path = "/etc/prometheus"
read_only = true
}
}
volume {
name = "config-volume"
secret {
secret_name = kubernetes_secret.pve_exporter_config.metadata[0].name
items {
key = "pve.yml"
path = "pve.yml" # This results in /etc/prometheus/pve.yml
}
}
}
}
}
}
}
resource "kubernetes_service" "proxmox-exporter" {
metadata {
name = "proxmox-exporter"
namespace = "monitoring"
labels = {
"app" = "proxmox-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/port" = 9221
"prometheus.io/path" = "/pve"
"prometheus.io/param_target" = "192.168.1.127"
"prometheus.io/param_node" = "1"
"prometheus.io/param_cluster" = "1"
}
}
spec {
selector = {
"app" = "proxmox-exporter"
}
port {
name = "http"
port = 9221
target_port = 9221
}
}
}
# To monitor the pve node, use the node exporter and the playbook in this repo. from the root run:
# ansible-playbook -i ./playbooks/inventory.ini ./playbooks/deploy_node_exporter.yaml
# This installs the exporter binary
# resource "helm_release" "loki" {
# namespace = "monitoring"
# create_namespace = true
# name = "loki"
# repository = "https://grafana.github.io/helm-charts"
# chart = "loki"
# values = [templatefile("${path.module}/loki.yaml", {})]
# atomic = true
# timeout = 120
# }
# resource "kubernetes_persistent_volume" "loki" {
# metadata {
# name = "loki"
# }
# spec {
# capacity = {
# storage = "15Gi"
# }
# access_modes = ["ReadWriteOnce"]
# persistent_volume_source {
# nfs {
# path = "/mnt/main/loki/loki"
# server = "10.0.10.15"
# }
# }
# persistent_volume_reclaim_policy = "Retain"
# volume_mode = "Filesystem"
# }
# }
# resource "kubernetes_persistent_volume" "loki-minio" {
# metadata {
# name = "loki-minio"
# }
# spec {
# capacity = {
# storage = "15Gi"
# }
# access_modes = ["ReadWriteMany"]
# persistent_volume_source {
# nfs {
# path = "/mnt/main/loki/minio"
# server = "10.0.10.15"
# }
# }
# persistent_volume_reclaim_policy = "Retain"
# volume_mode = "Filesystem"
# }
# }
# https://grafana.com/docs/alloy/latest/configure/kubernetes/
# resource "helm_release" "alloy" {
# namespace = "monitoring"
# create_namespace = true
# name = "alloy"
# repository = "https://grafana.github.io/helm-charts"
# chart = "alloy"
# atomic = true
# }
# Increase open file limits as alloy is reading files:
# https://serverfault.com/questions/1137211/failed-to-create-fsnotify-watcher-too-many-open-files
# run for all nodes using :
# for n in $(kbn | awk '{print $1}'); do echo $n; s wizard@$n 'sudo sysctl -w fs.inotify.max_user_watches=2099999999; sudo sysctl -w fs.inotify.max_user_instances=2099999999;sudo sysctl -w fs.inotify.max_queued_events=2099999999'; done
# resource "helm_release" "k8s-monitoring" {
# namespace = "monitoring"
# create_namespace = true
# name = "k8s-monitoring"
# repository = "https://grafana.github.io/helm-charts"
# chart = "k8s-monitoring"
# values = [templatefile("${path.module}/k8s-monitoring-values.yaml", {})]
# atomic = true
# }

View file

@ -0,0 +1,58 @@
resource "kubernetes_persistent_volume_claim" "prometheus_server_pvc" {
metadata {
name = "prometheus-iscsi-pvc"
namespace = "monitoring"
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = "15Gi"
}
}
# storage_class_name = "standard"
volume_name = "prometheus-iscsi-pv"
}
}
resource "kubernetes_persistent_volume" "prometheus_server_pvc" {
metadata {
name = "prometheus-iscsi-pv"
}
spec {
capacity = {
storage = "15Gi"
}
access_modes = ["ReadWriteOnce"]
persistent_volume_source {
nfs {
path = "/mnt/main/prometheus"
server = "10.0.10.15"
}
# iscsi {
# fs_type = "ext4"
# iqn = "iqn.2020-12.lan.viktorbarzin:storage:monitoring:prometheus"
# lun = 0
# target_portal = "iscsi.viktorbarzin.me:3260"
# }
}
persistent_volume_reclaim_policy = "Retain"
volume_mode = "Filesystem"
}
}
resource "helm_release" "prometheus" {
namespace = "monitoring"
create_namespace = true
name = "prometheus"
repository = "https://prometheus-community.github.io/helm-charts"
chart = "prometheus"
# version = "15.0.2"
version = "25.8.2"
values = [templatefile("${path.module}/prometheus_chart_values.tpl", { alertmanager_mail_pass = var.alertmanager_account_password, alertmanager_slack_api_url = var.alertmanager_slack_api_url, tuya_api_key = var.tiny_tuya_service_secret, haos_api_token = var.haos_api_token })]
}

View file

@ -0,0 +1,103 @@
resource "kubernetes_secret" "pve_exporter_config" {
metadata {
name = "pve-exporter-config"
namespace = "monitoring"
}
data = {
"pve.yml" = <<-EOF
default:
user: "root@pam"
password: ${var.pve_password}
verify_ssl: false
timeout: 30
EOF
}
}
resource "kubernetes_deployment" "pve_exporter" {
metadata {
name = "proxmox-exporter"
namespace = "monitoring"
}
spec {
replicas = 1
selector {
match_labels = {
app = "proxmox-exporter"
}
}
template {
metadata {
labels = {
app = "proxmox-exporter"
}
}
spec {
container {
name = "proxmox-exporter"
image = "prompve/prometheus-pve-exporter:latest"
port {
container_port = 9221
}
# Mount the file into the container
volume_mount {
name = "config-volume"
mount_path = "/etc/prometheus"
read_only = true
}
}
volume {
name = "config-volume"
secret {
secret_name = kubernetes_secret.pve_exporter_config.metadata[0].name
items {
key = "pve.yml"
path = "pve.yml" # This results in /etc/prometheus/pve.yml
}
}
}
}
}
}
}
resource "kubernetes_service" "proxmox-exporter" {
metadata {
name = "proxmox-exporter"
namespace = "monitoring"
labels = {
"app" = "proxmox-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/port" = 9221
"prometheus.io/path" = "/pve"
"prometheus.io/param_target" = "192.168.1.127"
"prometheus.io/param_node" = "1"
"prometheus.io/param_cluster" = "1"
}
}
spec {
selector = {
"app" = "proxmox-exporter"
}
port {
name = "http"
port = 9221
target_port = 9221
}
}
}
# To monitor the pve node, use the node exporter and the playbook in this repo. from the root run:
# ansible-playbook -i ./playbooks/inventory.ini ./playbooks/deploy_node_exporter.yaml
# This installs the exporter binary

View file

@ -0,0 +1,112 @@
/**
1. clone snmp exporter
2. update generator.yaml to include only interesting modules
3. make generate
4. cp snmp.yml to whereever is used
5. scrape service with curl 'http://snmp-exporter.monitoring.svc.cluster.local:9116/snmp?auth=public_v2&module=huawei&target=192.168.1.5%3A161'
generate reference - https://github.com/prometheus/snmp_exporter/tree/main/generator
https://sbcode.net/prometheus/snmp-generate-huawei/
*/
resource "kubernetes_config_map" "snmp-exporter-yaml" {
metadata {
name = "snmp-exporter-yaml"
namespace = "monitoring"
annotations = {
"reloader.stakater.com/match" = "true"
}
}
data = {
"snmp.yml" = file("${path.module}/ups_snmp_values.yaml")
}
}
resource "kubernetes_deployment" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = "monitoring"
labels = {
app = "snmp-exporter"
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "snmp-exporter"
}
}
template {
metadata {
labels = {
app = "snmp-exporter"
}
}
spec {
container {
image = "prom/snmp-exporter"
name = "snmp-exporter"
# command = ["/usr/local/bin/redfish_exporter", "--config.file", "/app/config.yml"]
port {
container_port = 9116
}
volume_mount {
name = "config-volume"
mount_path = "/etc/snmp_exporter/"
}
}
volume {
name = "config-volume"
config_map {
name = "snmp-exporter-yaml"
}
}
}
}
}
}
resource "kubernetes_service" "snmp-exporter" {
metadata {
name = "snmp-exporter"
namespace = "monitoring"
labels = {
"app" = "snmp-exporter"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/snmp?auth=Public0&target=tcp%3A%2F%2F192.%3A161"
"prometheus.io/port" = "9116"
}
}
spec {
selector = {
"app" = "snmp-exporter"
}
port {
name = "http"
port = "9116"
target_port = "9116"
}
}
}
module "snmp-exporter-ingress" {
source = "../ingress_factory"
namespace = "monitoring"
name = "snmp-exporter"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 9116
}