From c941199f8d4c45b57aca6d9fc70d1f9f570195fe Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Sun, 19 Apr 2026 10:37:30 +0000 Subject: [PATCH] [mailserver] Split Dovecot metrics port onto ClusterIP service [ci skip] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Context Port 9166 (`dovecot-metrics`) was exposed on the public MetalLB LoadBalancer 10.0.20.202 alongside SMTP/IMAP. While only LAN-routable, shipping an internal metric on the same listening IP as external mail conflated two concerns and over-exposed the port. Prometheus was scraping via the same LB Service. Addresses code-izl (follow-up to code-61v which added the scrape job). ## This change ### mailserver stack - Drops `dovecot-metrics` port from `kubernetes_service.mailserver` (LoadBalancer stays: 25, 465, 587, 993). - Adds new `kubernetes_service.mailserver_metrics` — ClusterIP-only, selecting the same `app=mailserver` pod, exposing 9166. ### monitoring stack - Updates `extraScrapeConfigs` in the Prometheus chart values to target the new `mailserver-metrics.mailserver.svc.cluster.local:9166` instead of `mailserver.mailserver.svc.cluster.local:9166`. - helm_release.prometheus updated in-place; configmap-reload sidecar picked up the new target within 10s. ``` mailserver LB mailserver-metrics ClusterIP ┌──────────────────┐ ┌──────────────────┐ │ 25 smtp │ │ 9166 dovecot- │ │ 465 smtp-secure │ │ metrics │ ← Prometheus only │ 587 smtp-auth │ └──────────────────┘ │ 993 imap-secure │ └──────────────────┘ ↑ 10.0.20.202 ``` ## What is NOT in this change - Per-Service RBAC/NetworkPolicy tightening (separate task) - Moving the metrics port to a dedicated sidecar-only Service Monitor (ServiceMonitor CRDs not installed; extraScrapeConfigs is correct for the prometheus-community chart in use) ## Test Plan ### Automated ``` $ kubectl get svc -n mailserver mailserver LoadBalancer 10.0.20.202 25/TCP,465/TCP,587/TCP,993/TCP mailserver-metrics ClusterIP 10.100.102.174 9166/TCP $ kubectl get endpoints -n mailserver mailserver-metrics mailserver-metrics 10.10.169.163:9166 $ # Prometheus target (after 10s configmap-reload) $ kubectl exec -n monitoring -c prometheus-server -- \ wget -qO- 'http://localhost:9090/api/v1/targets?scrapePool=mailserver-dovecot' scrapeUrl: http://mailserver-metrics.mailserver.svc.cluster.local:9166/metrics health: up ``` ### Manual Verification 1. From a host outside the cluster: `nc -vz 10.0.20.202 9166` → connection refused 2. Prometheus UI `/targets` → `mailserver-dovecot` UP, labels show new DNS name 3. PromQL: `up{job="mailserver-dovecot"}` returns `1` Closes: code-izl Co-Authored-By: Claude Opus 4.7 (1M context) --- stacks/mailserver/modules/mailserver/main.tf | 23 +++++++++++++++++++ .../monitoring/prometheus_chart_values.tpl | 7 +++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/stacks/mailserver/modules/mailserver/main.tf b/stacks/mailserver/modules/mailserver/main.tf index 4adcd50f..5367a8aa 100644 --- a/stacks/mailserver/modules/mailserver/main.tf +++ b/stacks/mailserver/modules/mailserver/main.tf @@ -580,6 +580,29 @@ resource "kubernetes_service" "mailserver" { port = 993 target_port = "imap-secure" } + } +} + +# Split the Dovecot metrics port off the public LB and onto its own +# ClusterIP Service. Port 9166 was only LAN-routable via 10.0.20.202 +# but was over-exposed for a Prometheus-internal metric. Addresses +# code-izl. Prometheus scrape target follows in +# stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl +# (updated to `mailserver-metrics.mailserver.svc.cluster.local:9166`). +resource "kubernetes_service" "mailserver_metrics" { + metadata { + name = "mailserver-metrics" + namespace = kubernetes_namespace.mailserver.metadata[0].name + labels = { + app = "mailserver" + } + } + + spec { + type = "ClusterIP" + selector = { + app = "mailserver" + } port { name = "dovecot-metrics" diff --git a/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl b/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl index aac306d9..c083b2a5 100755 --- a/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl +++ b/stacks/monitoring/modules/monitoring/prometheus_chart_values.tpl @@ -2033,11 +2033,12 @@ serverFiles: extraScrapeConfigs: | - job_name: 'mailserver-dovecot' # Dovecot exporter lives on the mailserver pod; port 9166 is exposed by - # the mailserver Service (`dovecot-metrics`). Kube-prometheus-stack (with - # ServiceMonitor CRDs) isn't deployed here, so we scrape by service DNS. + # the dedicated ClusterIP Service `mailserver-metrics` (split from the + # public LB in code-izl). Kube-prometheus-stack (with ServiceMonitor + # CRDs) isn't deployed here, so we scrape by service DNS. static_configs: - targets: - - "mailserver.mailserver.svc.cluster.local:9166" + - "mailserver-metrics.mailserver.svc.cluster.local:9166" metrics_path: '/metrics' scrape_interval: 30s - job_name: 'proxmox-host'