Phase 1 - OOM fixes: - dashy: increase memory limit 512Mi→1Gi (was at 99% utilization) - caretta DaemonSet: set explicit resources 300Mi/512Mi (was at 85-98%) - mysql-operator: add Helm resource values 256Mi/512Mi, create namespace with tier label (was at 92% of LimitRange default) - prowlarr, flaresolverr, annas-archive-stacks: add explicit resources (outgrowing 256Mi LimitRange defaults) - real-estate-crawler celery: add resources 512Mi/3Gi (608Mi actual, no explicit resources) Phase 2 - Close quota gaps: - nvidia, real-estate-crawler, trading-bot: remove custom-quota=true labels so Kyverno generates tier-appropriate quotas - descheduler: add tier=1-cluster label for proper classification Phase 3 - Reduce excessive quotas: - monitoring: limits.memory 240Gi→64Gi, limits.cpu 120→64 - woodpecker: limits.memory 128Gi→32Gi, limits.cpu 64→16 - GPU tier default: limits.memory 96Gi→32Gi, limits.cpu 48→16 Phase 4 - Kubelet protection: - Add cpu: 200m to systemReserved and kubeReserved in kubelet template Phase 5 - HA improvements: - cloudflared: add topology spread (ScheduleAnyway) + PDB (maxUnavailable:1) - grafana: add topology spread + PDB via Helm values - crowdsec LAPI: add topology spread + PDB via Helm values - authentik server: add topology spread via Helm values - authentik worker: add topology spread + PDB via Helm values
65 lines
1.5 KiB
YAML
65 lines
1.5 KiB
YAML
authentik:
|
|
log_level: warning
|
|
# log_level: trace
|
|
secret_key: "${secret_key}"
|
|
# This sends anonymous usage-data, stack traces on errors and
|
|
# performance data to authentik.error-reporting.a7k.io, and is fully opt-in
|
|
error_reporting:
|
|
enabled: true
|
|
postgresql:
|
|
# host: postgresql.dbaas
|
|
host: pgbouncer.authentik
|
|
port: 6432
|
|
user: authentik
|
|
password: ${postgres_password}
|
|
redis:
|
|
host: ${redis_host}
|
|
|
|
server:
|
|
replicas: 3
|
|
resources:
|
|
requests:
|
|
cpu: 100m
|
|
memory: 512Mi
|
|
limits:
|
|
cpu: "2"
|
|
memory: 1Gi
|
|
topologySpreadConstraints:
|
|
- maxSkew: 1
|
|
topologyKey: kubernetes.io/hostname
|
|
whenUnsatisfiable: ScheduleAnyway
|
|
labelSelector:
|
|
matchLabels:
|
|
app.kubernetes.io/component: server
|
|
ingress:
|
|
enabled: false
|
|
# hosts:
|
|
# - authentik.viktorbarzin.me
|
|
podAnnotations:
|
|
diun.enable: true
|
|
diun.include_tags: "^202[0-9].[0-9]+.*$" # no need to annotate the worker as it uses the same image
|
|
pdb:
|
|
enabled: true
|
|
minAvailable: 2
|
|
global:
|
|
addPrometheusAnnotations: true
|
|
|
|
worker:
|
|
replicas: 3
|
|
resources:
|
|
requests:
|
|
cpu: 50m
|
|
memory: 384Mi
|
|
limits:
|
|
cpu: "1"
|
|
memory: 1Gi
|
|
topologySpreadConstraints:
|
|
- maxSkew: 1
|
|
topologyKey: kubernetes.io/hostname
|
|
whenUnsatisfiable: ScheduleAnyway
|
|
labelSelector:
|
|
matchLabels:
|
|
app.kubernetes.io/component: worker
|
|
pdb:
|
|
enabled: true
|
|
maxUnavailable: 1
|