- NodeDown now suppresses workload and service alerts (PodCrashLooping, DeploymentReplicasMismatch, StatefulSetReplicasMismatch, etc.) - NFSServerUnresponsive suppresses pod-level alerts - Increased for durations on transient alerts (e.g. 15m→30m for replica mismatches) - NodeDown for: 1m→3m to avoid flapping - Removed all 3 Loki log-based alerts (duplicated Prometheus alerts) - Downgraded HeadscaleDown critical→warning, mail server page→warning
137 lines
3.2 KiB
HCL
137 lines
3.2 KiB
HCL
variable "nfs_server" { type = string }
|
|
|
|
resource "helm_release" "loki" {
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
create_namespace = true
|
|
name = "loki"
|
|
|
|
repository = "https://grafana.github.io/helm-charts"
|
|
chart = "loki"
|
|
|
|
values = [templatefile("${path.module}/loki.yaml", {})]
|
|
timeout = 600
|
|
|
|
depends_on = [kubernetes_config_map.loki_alert_rules]
|
|
}
|
|
|
|
# https://grafana.com/docs/alloy/latest/configure/kubernetes/
|
|
resource "helm_release" "alloy" {
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
create_namespace = true
|
|
name = "alloy"
|
|
|
|
repository = "https://grafana.github.io/helm-charts"
|
|
chart = "alloy"
|
|
|
|
values = [file("${path.module}/alloy.yaml")]
|
|
atomic = true
|
|
|
|
depends_on = [helm_release.loki]
|
|
}
|
|
|
|
resource "kubernetes_daemon_set_v1" "sysctl-inotify" {
|
|
metadata {
|
|
name = "sysctl-inotify"
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
labels = {
|
|
app = "sysctl-inotify"
|
|
}
|
|
}
|
|
spec {
|
|
selector {
|
|
match_labels = {
|
|
app = "sysctl-inotify"
|
|
}
|
|
}
|
|
template {
|
|
metadata {
|
|
labels = {
|
|
app = "sysctl-inotify"
|
|
}
|
|
}
|
|
spec {
|
|
init_container {
|
|
name = "sysctl"
|
|
image = "busybox:1.37"
|
|
command = [
|
|
"sh", "-c",
|
|
"sysctl -w fs.inotify.max_user_watches=1048576 && sysctl -w fs.inotify.max_user_instances=8192 && sysctl -w fs.inotify.max_queued_events=1048576"
|
|
]
|
|
security_context {
|
|
privileged = true
|
|
}
|
|
}
|
|
container {
|
|
name = "pause"
|
|
image = "registry.k8s.io/pause:3.10"
|
|
resources {
|
|
requests = {
|
|
cpu = "1m"
|
|
memory = "4Mi"
|
|
}
|
|
limits = {
|
|
cpu = "1m"
|
|
memory = "4Mi"
|
|
}
|
|
}
|
|
}
|
|
host_pid = true
|
|
toleration {
|
|
operator = "Exists"
|
|
}
|
|
dns_config {
|
|
option {
|
|
name = "ndots"
|
|
value = "2"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# resource "helm_release" "k8s-monitoring" {
|
|
# namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
# create_namespace = true
|
|
# name = "k8s-monitoring"
|
|
|
|
# repository = "https://grafana.github.io/helm-charts"
|
|
# chart = "k8s-monitoring"
|
|
|
|
# values = [templatefile("${path.module}/k8s-monitoring-values.yaml", {})]
|
|
# atomic = true
|
|
# }
|
|
|
|
resource "kubernetes_config_map" "loki_alert_rules" {
|
|
metadata {
|
|
name = "loki-alert-rules"
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
}
|
|
data = {
|
|
"rules.yaml" = yamlencode({
|
|
groups = []
|
|
})
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_config_map" "grafana_loki_datasource" {
|
|
metadata {
|
|
name = "grafana-loki-datasource"
|
|
namespace = kubernetes_namespace.monitoring.metadata[0].name
|
|
labels = {
|
|
grafana_datasource = "1"
|
|
}
|
|
}
|
|
data = {
|
|
"loki-datasource.yaml" = yamlencode({
|
|
apiVersion = 1
|
|
datasources = [{
|
|
name = "Loki"
|
|
type = "loki"
|
|
access = "proxy"
|
|
url = "http://loki.monitoring.svc.cluster.local:3100"
|
|
isDefault = false
|
|
}]
|
|
})
|
|
}
|
|
}
|