Right-size CPU requests cluster-wide and remove missed CPU limits

Increase requests for under-requested pods (dashy 50m→250m, frigate 500m→1500m,
clickhouse 100m→500m, otp 100m→300m, linkwarden 25m→50m, authentik worker 50m→100m).

Reduce requests for over-requested pods (crowdsec agent/lapi 500m→25m each,
prometheus 200m→100m, dbaas mysql 1800m→100m, pg-cluster 250m→50m,
shlink-web 250m→10m, gpu-pod-exporter 50m→10m, stirling-pdf 100m→25m,
technitium 100m→25m, celery 50m→15m). Reduce crowdsec quota from 8→1 CPU.

Remove missed CPU limits in prometheus (cpu: "2") and dbaas (cpu: "3600m") tpl files.
This commit is contained in:
Viktor Barzin 2026-03-14 09:22:24 +00:00
parent b00f810d3d
commit 2102cb2d73
17 changed files with 28 additions and 18 deletions

View file

@ -70,7 +70,7 @@ resource "kubernetes_deployment" "dashy" {
resources {
requests = {
cpu = "50m"
cpu = "250m"
memory = "512Mi"
}
limits = {

View file

@ -85,7 +85,7 @@ resource "kubernetes_deployment" "frigate" {
resources {
requests = {
cpu = "500m"
cpu = "1500m"
memory = "2Gi"
}
limits = {

View file

@ -106,7 +106,7 @@ resource "kubernetes_deployment" "linkwarden" {
}
resources {
requests = {
cpu = "25m"
cpu = "50m"
memory = "256Mi"
}
limits = {

View file

@ -254,7 +254,7 @@ resource "kubernetes_deployment" "otp" {
}
resources {
requests = {
cpu = "100m"
cpu = "300m"
memory = "2Gi"
}
limits = {

View file

@ -47,7 +47,7 @@ worker:
replicas: 3
resources:
requests:
cpu: 50m
cpu: 100m
memory: 384Mi
limits:
memory: 1Gi

View file

@ -365,7 +365,7 @@ resource "kubernetes_resource_quota" "crowdsec" {
}
spec {
hard = {
"requests.cpu" = "8"
"requests.cpu" = "1"
"requests.memory" = "8Gi"
"limits.memory" = "16Gi"
pods = "30"

View file

@ -2,6 +2,12 @@
container_runtime: containerd
agent:
resources:
requests:
cpu: 25m
memory: 64Mi
limits:
memory: 512Mi
priorityClassName: "tier-1-cluster"
# To specify each pod you want to process it logs (pods present in the node)
acquisition:
@ -44,6 +50,12 @@ agent:
configMap:
name: crowdsec-whitelist
lapi:
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
memory: 1Gi
priorityClassName: "tier-1-cluster"
replicas: 3
topologySpreadConstraints:

View file

@ -11,7 +11,6 @@ podSpec:
resources:
requests:
memory: "1024Mi" # adapt to your needs
cpu: "1800m" # adapt to your needs
cpu: "100m" # adapt to your needs
limits:
memory: "2048Mi" # adapt to your needs
cpu: "3600m" # adapt to your needs

View file

@ -867,7 +867,7 @@ resource "null_resource" "pg_cluster" {
storageClass: iscsi-truenas
resources:
requests:
cpu: "250m"
cpu: "50m"
memory: "512Mi"
limits:
memory: "4Gi"

View file

@ -145,10 +145,9 @@ server:
retention: "52w"
resources:
requests:
cpu: 200m
cpu: 100m
memory: 1Gi
limits:
cpu: "2"
memory: 4Gi
strategy:
type: Recreate

View file

@ -613,7 +613,7 @@ resource "kubernetes_daemonset" "gpu_pod_exporter" {
resources {
requests = {
cpu = "50m"
cpu = "10m"
memory = "128Mi"
}
limits = {

View file

@ -105,7 +105,7 @@ resource "kubernetes_deployment" "technitium_secondary" {
}
resources {
requests = {
cpu = "100m"
cpu = "25m"
memory = "128Mi"
}
limits = {

View file

@ -165,7 +165,7 @@ resource "kubernetes_deployment" "technitium" {
name = "technitium"
resources {
requests = {
cpu = "100m"
cpu = "25m"
memory = "128Mi"
}
limits = {

View file

@ -321,7 +321,7 @@ resource "kubernetes_deployment" "realestate-crawler-celery" {
command = ["python", "-m", "celery", "-A", "celery_app", "worker", "--loglevel=info", "--pool=threads"]
resources {
requests = {
cpu = "50m"
cpu = "15m"
memory = "512Mi"
}
limits = {

View file

@ -115,7 +115,7 @@ resource "kubernetes_deployment" "clickhouse" {
}
resources {
requests = {
cpu = "100m"
cpu = "500m"
memory = "512Mi"
}
limits = {

View file

@ -57,7 +57,7 @@ resource "kubernetes_deployment" "stirling-pdf" {
name = "stirling-pdf"
resources {
requests = {
cpu = "100m"
cpu = "25m"
memory = "512Mi"
}
limits = {

View file

@ -283,7 +283,7 @@ resource "kubernetes_deployment" "shlink-web" {
memory = "512Mi"
}
requests = {
cpu = "250m"
cpu = "10m"
memory = "50Mi"
}
}