extract dbaas, authentik, crowdsec from platform into independent stacks [ci skip]

Phase 1 of platform stack split for parallel CI applies.
All 3 modules were fully independent (no cross-module refs).
State migrated via terraform state mv. All 3 stacks applied
with zero changes (dbaas had pre-existing ResourceQuota drift).
Woodpecker pipeline updated to run extracted stacks in parallel.
This commit is contained in:
Viktor Barzin 2026-03-17 18:11:53 +00:00
parent c8b42f78df
commit 3c804aedf8
28 changed files with 2306 additions and 57 deletions

View file

@ -32,9 +32,9 @@ steps:
kubernetes:
resources:
requests:
memory: 1Gi
limits:
memory: 2Gi
limits:
memory: 4Gi
commands:
- "apk update && apk add curl unzip git openssh-client"
# Install Terraform
@ -45,8 +45,13 @@ steps:
- "chmod 755 /usr/local/bin/terragrunt"
# Source Vault token
- "source .vault-env"
# Apply platform stack (core infrastructure services)
# Apply extracted stacks in parallel (slow modules)
- "cd stacks/dbaas && terragrunt apply --non-interactive -auto-approve &"
- "cd stacks/authentik && terragrunt apply --non-interactive -auto-approve &"
- "cd stacks/crowdsec && terragrunt apply --non-interactive -auto-approve &"
# Apply platform stack (remaining core infrastructure services)
- "cd stacks/platform && terragrunt apply --non-interactive -auto-approve"
- "wait"
- name: cleanup-and-push
image: alpine

25
stacks/authentik/main.tf Normal file
View file

@ -0,0 +1,25 @@
# =============================================================================
# Authentik Stack Identity provider (SSO)
# =============================================================================
variable "tls_secret_name" { type = string }
variable "redis_host" { type = string }
data "vault_kv_secret_v2" "secrets" {
mount = "secret"
name = "platform"
}
locals {
homepage_credentials = jsondecode(data.vault_kv_secret_v2.secrets.data["homepage_credentials"])
}
module "authentik" {
source = "./modules/authentik"
tier = local.tiers.cluster
tls_secret_name = var.tls_secret_name
secret_key = data.vault_kv_secret_v2.secrets.data["authentik_secret_key"]
postgres_password = data.vault_kv_secret_v2.secrets.data["authentik_postgres_password"]
redis_host = var.redis_host
homepage_token = try(local.homepage_credentials["authentik"]["token"], "")
}

View file

@ -0,0 +1,88 @@
variable "tls_secret_name" {}
variable "secret_key" {}
variable "postgres_password" {}
variable "tier" { type = string }
variable "redis_host" { type = string }
variable "homepage_token" {
type = string
default = ""
sensitive = true
}
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.authentik.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "authentik" {
metadata {
name = "authentik"
labels = {
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "kubernetes_resource_quota" "authentik" {
metadata {
name = "authentik-quota"
namespace = kubernetes_namespace.authentik.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "16"
"requests.memory" = "16Gi"
"limits.memory" = "96Gi"
pods = "50"
}
}
}
resource "helm_release" "authentik" {
namespace = kubernetes_namespace.authentik.metadata[0].name
create_namespace = true
name = "goauthentik"
repository = "https://charts.goauthentik.io/"
chart = "authentik"
# version = "2025.8.1"
version = "2025.10.3"
atomic = true
timeout = 6000
values = [templatefile("${path.module}/values.yaml", { postgres_password = var.postgres_password, secret_key = var.secret_key, redis_host = var.redis_host })]
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.authentik.metadata[0].name
name = "authentik"
service_name = "goauthentik-server"
tls_secret_name = var.tls_secret_name
extra_annotations = {
"gethomepage.dev/enabled" = "true"
"gethomepage.dev/name" = "Authentik"
"gethomepage.dev/description" = "Identity provider"
"gethomepage.dev/icon" = "authentik.png"
"gethomepage.dev/group" = "Identity & Security"
"gethomepage.dev/pod-selector" = ""
"gethomepage.dev/widget.type" = "authentik"
"gethomepage.dev/widget.url" = "http://goauthentik-server.authentik.svc.cluster.local"
"gethomepage.dev/widget.key" = var.homepage_token
}
}
module "ingress-outpost" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.authentik.metadata[0].name
name = "authentik-outpost"
host = "authentik"
service_name = "ak-outpost-authentik-embedded-outpost"
port = 9000
ingress_path = ["/outpost.goauthentik.io"]
tls_secret_name = var.tls_secret_name
}

View file

@ -0,0 +1,14 @@
[databases]
authentik = host=postgresql.dbaas port=5432 dbname=authentik user=authentik password=${password}
[pgbouncer]
listen_addr = 0.0.0.0
listen_port = 6432
auth_type = md5
auth_file = /etc/pgbouncer/userlist.txt
pool_mode = transaction
max_client_conn = 200
default_pool_size = 20
reserve_pool_size = 5
reserve_pool_timeout = 5
ignore_startup_parameters = extra_float_digits

View file

@ -0,0 +1,140 @@
resource "kubernetes_config_map" "pgbouncer_config" {
metadata {
name = "pgbouncer-config"
namespace = "authentik"
}
data = {
"pgbouncer.ini" = templatefile("${path.module}/pgbouncer.ini", { password = var.postgres_password })
}
}
# --- 2 Secret for user credentials ---
resource "kubernetes_secret" "pgbouncer_auth" {
metadata {
name = "pgbouncer-auth"
namespace = "authentik"
}
data = {
"userlist.txt" = templatefile("${path.module}/userlist.txt", { password = var.postgres_password })
}
type = "Opaque"
}
# --- 3 Deployment ---
resource "kubernetes_deployment" "pgbouncer" {
metadata {
name = "pgbouncer"
namespace = "authentik"
labels = {
app = "pgbouncer"
tier = var.tier
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "pgbouncer"
}
}
template {
metadata {
labels = {
app = "pgbouncer"
}
}
spec {
affinity {
pod_anti_affinity {
required_during_scheduling_ignored_during_execution {
label_selector {
match_expressions {
key = "component"
operator = "In"
values = ["server"]
}
}
topology_key = "kubernetes.io/hostname"
}
}
}
container {
name = "pgbouncer"
image = "edoburu/pgbouncer:latest"
image_pull_policy = "IfNotPresent"
port {
container_port = 6432
}
volume_mount {
name = "config"
mount_path = "/etc/pgbouncer/pgbouncer.ini"
sub_path = "pgbouncer.ini"
}
volume_mount {
name = "auth"
mount_path = "/etc/pgbouncer/userlist.txt"
sub_path = "userlist.txt"
}
env {
name = "DATABASES_AUTHENTIK"
value = "host=postgres port=5432 dbname=authentik user=authentik password=${var.postgres_password}"
}
}
volume {
name = "config"
config_map {
name = kubernetes_config_map.pgbouncer_config.metadata[0].name
}
}
volume {
name = "auth"
secret {
secret_name = kubernetes_secret.pgbouncer_auth.metadata[0].name
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
depends_on = [kubernetes_secret.pgbouncer_auth]
}
# --- 4 Service ---
resource "kubernetes_service" "pgbouncer" {
metadata {
name = "pgbouncer"
namespace = "authentik"
}
spec {
selector = {
app = "pgbouncer"
}
port {
port = 6432
target_port = 6432
protocol = "TCP"
}
type = "ClusterIP"
}
}

View file

@ -0,0 +1 @@
"authentik" "${password}"

View file

@ -0,0 +1,73 @@
authentik:
log_level: warning
# log_level: trace
secret_key: "${secret_key}"
# This sends anonymous usage-data, stack traces on errors and
# performance data to authentik.error-reporting.a7k.io, and is fully opt-in
error_reporting:
enabled: true
postgresql:
# host: postgresql.dbaas
host: pgbouncer.authentik
port: 6432
user: authentik
password: ${postgres_password}
redis:
host: ${redis_host}
server:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
memory: 1Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: server
ingress:
enabled: false
# hosts:
# - authentik.viktorbarzin.me
podAnnotations:
diun.enable: true
diun.include_tags: "^202[0-9].[0-9]+.*$" # no need to annotate the worker as it uses the same image
pdb:
enabled: true
minAvailable: 2
global:
addPrometheusAnnotations: true
worker:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
resources:
requests:
cpu: 100m
memory: 896Mi
limits:
memory: 896Mi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/component: worker
pdb:
enabled: true
maxUnavailable: 1

1
stacks/authentik/secrets Symbolic link
View file

@ -0,0 +1 @@
../../secrets

View file

@ -0,0 +1,8 @@
include "root" {
path = find_in_parent_folders()
}
dependency "infra" {
config_path = "../infra"
skip_outputs = true
}

10
stacks/authentik/tiers.tf Normal file
View file

@ -0,0 +1,10 @@
# Generated by Terragrunt. Sig: nIlQXj57tbuaRZEa
locals {
tiers = {
core = "0-core"
cluster = "1-cluster"
gpu = "2-gpu"
edge = "3-edge"
aux = "4-aux"
}
}

30
stacks/crowdsec/main.tf Normal file
View file

@ -0,0 +1,30 @@
# =============================================================================
# CrowdSec Stack Security/WAF
# =============================================================================
variable "tls_secret_name" { type = string }
variable "mysql_host" { type = string }
data "vault_kv_secret_v2" "secrets" {
mount = "secret"
name = "platform"
}
locals {
homepage_credentials = jsondecode(data.vault_kv_secret_v2.secrets.data["homepage_credentials"])
}
module "crowdsec" {
source = "./modules/crowdsec"
tier = local.tiers.cluster
tls_secret_name = var.tls_secret_name
mysql_host = var.mysql_host
homepage_username = local.homepage_credentials["crowdsec"]["username"]
homepage_password = local.homepage_credentials["crowdsec"]["password"]
enroll_key = data.vault_kv_secret_v2.secrets.data["crowdsec_enroll_key"]
db_password = data.vault_kv_secret_v2.secrets.data["crowdsec_db_password"]
crowdsec_dash_api_key = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_api_key"]
crowdsec_dash_machine_id = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_machine_id"]
crowdsec_dash_machine_password = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_machine_password"]
slack_webhook_url = data.vault_kv_secret_v2.secrets.data["alertmanager_slack_api_url"]
}

View file

@ -0,0 +1,44 @@
controller:
extraVolumes:
- name: crowdsec-bouncer-plugin
emptyDir: {}
extraInitContainers:
- name: init-clone-crowdsec-bouncer
image: crowdsecurity/lua-bouncer-plugin
imagePullPolicy: IfNotPresent
env:
- name: API_URL
value: "http://crowdsec-service.crowdsec.svc.cluster.local:8080" # crowdsec lapi service-name
- name: API_KEY
value: "<API KEY>" # generated with `cscli bouncers add -n <bouncer_name>
- name: BOUNCER_CONFIG
value: "/crowdsec/crowdsec-bouncer.conf"
- name: CAPTCHA_PROVIDER
value: "recaptcha" # valid providers are recaptcha, hcaptcha, turnstile
- name: SECRET_KEY
value: "<your-captcha-secret-key>" # If you want captcha support otherwise remove this ENV VAR
- name: SITE_KEY
value: "<your-captcha-site-key>" # If you want captcha support otherwise remove this ENV VAR
- name: BAN_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/ban.html
- name: CAPTCHA_TEMPLATE_PATH
value: /etc/nginx/lua/plugins/crowdsec/templates/captcha.html
command:
[
"sh",
"-c",
"sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/",
]
volumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /lua_plugins
extraVolumeMounts:
- name: crowdsec-bouncer-plugin
mountPath: /etc/nginx/lua/plugins/crowdsec
subPath: crowdsec
config:
plugins: "crowdsec"
lua-shared-dicts: "crowdsec_cache: 50m"
server-snippet: |
lua_ssl_trusted_certificate "/etc/ssl/certs/ca-certificates.crt"; # If you want captcha support otherwise remove this line
resolver local=on ipv6=off;

View file

@ -0,0 +1,376 @@
variable "tls_secret_name" {}
variable "homepage_username" {}
variable "homepage_password" {}
variable "db_password" {}
variable "enroll_key" {}
variable "crowdsec_dash_api_key" {
type = string
sensitive = true
}
variable "crowdsec_dash_machine_id" { type = string } # used for web dash
variable "crowdsec_dash_machine_password" {
type = string
sensitive = true
}
variable "tier" { type = string }
variable "slack_webhook_url" { type = string }
variable "mysql_host" { type = string }
module "tls_secret" {
source = "../../../../modules/kubernetes/setup_tls_secret"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "crowdsec" {
metadata {
name = "crowdsec"
labels = {
tier = var.tier
"resource-governance/custom-quota" = "true"
}
}
}
resource "kubernetes_config_map" "crowdsec_custom_scenarios" {
metadata {
name = "crowdsec-custom-scenarios"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app.kubernetes.io/name" = "crowdsec"
}
}
data = {
"http-403-abuse.yaml" = <<-YAML
type: leaky
name: crowdsecurity/http-403-abuse
description: "Detect IPs triggering too many HTTP 403s in NGINX ingress logs"
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '403'"
groupby: "evt.Meta.source_ip"
leakspeed: "2s"
capacity: 10
blackhole: 5m
labels:
service: http
behavior: abusive_403
remediation: true
YAML
"http-429-abuse.yaml" : <<-YAML
type: leaky
name: crowdsecurity/http-429-abuse
description: "Detect IPs repeatedly triggering rate-limit (HTTP 429)"
filter: "evt.Meta.log_type == 'http_access-log' && evt.Parsed.status == '429'"
groupby: "evt.Meta.source_ip"
leakspeed: "10s"
capacity: 5
blackhole: 1m
labels:
service: http
behavior: rate_limit_abuse
remediation: true
YAML
}
}
# Whitelist for trusted IPs that should never be blocked
resource "kubernetes_config_map" "crowdsec_whitelist" {
metadata {
name = "crowdsec-whitelist"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app.kubernetes.io/name" = "crowdsec"
}
}
data = {
"whitelist.yaml" = <<-YAML
name: crowdsecurity/whitelist-trusted-ips
description: "Whitelist for trusted IPs that should never be blocked"
whitelist:
reason: "Trusted IP - never block"
ip:
- "176.12.22.76"
YAML
}
}
resource "helm_release" "crowdsec" {
namespace = kubernetes_namespace.crowdsec.metadata[0].name
create_namespace = true
name = "crowdsec"
atomic = true
version = "0.21.0"
repository = "https://crowdsecurity.github.io/helm-charts"
chart = "crowdsec"
values = [templatefile("${path.module}/values.yaml", { homepage_username = var.homepage_username, homepage_password = var.homepage_password, DB_PASSWORD = var.db_password, ENROLL_KEY = var.enroll_key, SLACK_WEBHOOK_URL = var.slack_webhook_url, mysql_host = var.mysql_host })]
timeout = 900
wait = true
wait_for_jobs = true
}
# Deployment for my custom dashboard that helps me unblock myself when I blocklist myself
resource "kubernetes_deployment" "crowdsec-web" {
metadata {
name = "crowdsec-web"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
app = "crowdsec_web"
"kubernetes.io/cluster-service" = "true"
tier = var.tier
}
}
spec {
replicas = 1
strategy {
type = "RollingUpdate"
}
selector {
match_labels = {
app = "crowdsec_web"
}
}
template {
metadata {
labels = {
app = "crowdsec_web"
"kubernetes.io/cluster-service" = "true"
}
}
spec {
priority_class_name = "tier-1-cluster"
container {
name = "crowdsec-web"
image = "viktorbarzin/crowdsec_web"
env {
name = "CS_API_URL"
value = "http://crowdsec-service.crowdsec.svc.cluster.local:8080/v1"
}
env {
name = "CS_API_KEY"
value = var.crowdsec_dash_api_key
}
env {
name = "CS_MACHINE_ID"
value = var.crowdsec_dash_machine_id
}
env {
name = "CS_MACHINE_PASSWORD"
value = var.crowdsec_dash_machine_password
}
port {
name = "http"
container_port = 8000
protocol = "TCP"
}
resources {
requests = {
cpu = "15m"
memory = "128Mi"
}
limits = {
memory = "128Mi"
}
}
}
dns_config {
option {
name = "ndots"
value = "2"
}
}
}
}
}
}
resource "kubernetes_service" "crowdsec-web" {
metadata {
name = "crowdsec-web"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
"app" = "crowdsec_web"
}
}
spec {
selector = {
app = "crowdsec_web"
}
port {
port = "80"
target_port = "8000"
}
}
}
module "ingress" {
source = "../../../../modules/kubernetes/ingress_factory"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
name = "crowdsec-web"
protected = true
tls_secret_name = var.tls_secret_name
exclude_crowdsec = true
rybbit_site_id = "d09137795ccc"
}
# CronJob to import public blocklists into CrowdSec
# https://github.com/wolffcatskyy/crowdsec-blocklist-import
# Uses kubectl exec to run in an existing CrowdSec agent pod that's already registered
resource "kubernetes_cron_job_v1" "crowdsec_blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
app = "crowdsec-blocklist-import"
tier = var.tier
}
}
spec {
# Run daily at 4 AM
schedule = "0 4 * * *"
timezone = "Europe/London"
concurrency_policy = "Forbid"
successful_jobs_history_limit = 3
failed_jobs_history_limit = 3
job_template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
backoff_limit = 3
template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
service_account_name = kubernetes_service_account.blocklist_import.metadata[0].name
restart_policy = "OnFailure"
container {
name = "blocklist-import"
image = "bitnami/kubectl:latest"
command = ["/bin/bash", "-c"]
args = [
<<-EOF
set -e
echo "Finding CrowdSec agent pod..."
AGENT_POD=$(kubectl get pods -n crowdsec -l k8s-app=crowdsec,type=agent -o jsonpath='{.items[0].metadata.name}')
if [ -z "$AGENT_POD" ]; then
echo "ERROR: Could not find CrowdSec agent pod"
exit 1
fi
echo "Using agent pod: $AGENT_POD"
# Download the import script
echo "Downloading blocklist import script..."
curl -fsSL -o /tmp/import.sh \
https://raw.githubusercontent.com/wolffcatskyy/crowdsec-blocklist-import/main/import.sh
chmod +x /tmp/import.sh
# Copy script to agent pod and execute
echo "Copying script to agent pod and executing..."
kubectl cp /tmp/import.sh crowdsec/$AGENT_POD:/tmp/import.sh
kubectl exec -n crowdsec "$AGENT_POD" -- /bin/bash -c '
set -e
# Run with native mode since we are inside the CrowdSec container
export MODE=native
export DECISION_DURATION=24h
export FETCH_TIMEOUT=60
export LOG_LEVEL=INFO
/tmp/import.sh
# Cleanup
rm -f /tmp/import.sh
'
echo "Blocklist import completed successfully!"
EOF
]
}
}
}
}
}
}
}
# Service account for the blocklist import job (needs kubectl exec permissions)
resource "kubernetes_service_account" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}
resource "kubernetes_role" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["get", "list"]
}
rule {
api_groups = [""]
resources = ["pods/exec"]
verbs = ["create"]
}
}
resource "kubernetes_role_binding" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = kubernetes_role.blocklist_import.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.blocklist_import.metadata[0].name
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}
# Custom ResourceQuota for CrowdSec needs more than default 1-cluster quota
# because it runs DaemonSet agents (1 per worker node) + 3 LAPI replicas + web UI
resource "kubernetes_resource_quota" "crowdsec" {
metadata {
name = "crowdsec-quota"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
spec {
hard = {
"requests.cpu" = "4"
"requests.memory" = "8Gi"
"limits.memory" = "16Gi"
pods = "30"
}
}
}

View file

@ -0,0 +1,226 @@
# values from - https://github.com/crowdsecurity/helm-charts/blob/main/charts/crowdsec/values.yaml
container_runtime: containerd
agent:
resources:
requests:
cpu: 25m
memory: 64Mi
limits:
memory: 512Mi
priorityClassName: "tier-1-cluster"
# To specify each pod you want to process it logs (pods present in the node)
acquisition:
# The namespace where the pod is located
- namespace: traefik
# The pod name
podName: traefik-*
# as in crowdsec configuration, we need to specify the program name so the parser will match and parse logs
program: traefik
# Those are ENV variables
env:
# As it's a test, we don't want to share signals with CrowdSec so disable the Online API.
# - name: DISABLE_ONLINE_API
# value: "true"
# As we are running Traefik, we want to install the Traefik collection
- name: COLLECTIONS
value: "crowdsecurity/traefik crowdsecurity/base-http-scenarios crowdsecurity/http-cve"
- name: SCENARIOS
value: ""
# value: "crowdsecurity/http-crawl-aggressive"
# Mount custom scenarios into /etc/crowdsec/scenarios
extraVolumeMounts:
- name: custom-scenarios
mountPath: /etc/crowdsec/scenarios/http-403-abuse.yaml
subPath: "http-403-abuse.yaml"
readonly: true
- name: custom-scenarios
mountPath: /etc/crowdsec/scenarios/http-429-abuse.yaml
subPath: "http-429-abuse.yaml"
readonly: true
- name: whitelist
mountPath: /etc/crowdsec/parsers/s02-enrich/whitelist.yaml
subPath: "whitelist.yaml"
readonly: true
extraVolumes:
- name: custom-scenarios
configMap:
name: crowdsec-custom-scenarios
- name: whitelist
configMap:
name: crowdsec-whitelist
lapi:
resources:
requests:
cpu: 25m
memory: 128Mi
limits:
memory: 1Gi
startupProbe:
httpGet:
path: /health
port: 8080
failureThreshold: 30
periodSeconds: 10
priorityClassName: "tier-1-cluster"
replicas: 3
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: crowdsec
type: lapi
pdb:
enabled: true
maxUnavailable: 1
extraSecrets:
dbPassword: "${DB_PASSWORD}"
storeCAPICredentialsInSecret: true
persistentVolume:
config:
enabled: false
data:
enabled: false
env:
- name: ENROLL_KEY
value: "${ENROLL_KEY}"
- name: ENROLL_INSTANCE_NAME
value: "k8s-cluster"
- name: ENROLL_TAGS
value: "k8s linux"
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: crowdsec-lapi-secrets
key: dbPassword
# As it's a test, we don't want to share signals with CrowdSec, so disable the Online API.
# - name: DISABLE_ONLINE_API
# value: "true"
dashboard:
enabled: true
env:
- name: MB_DB_TYPE
value: "mysql"
- name: MB_DB_DBNAME
value: crowdsec-metabase
- name: MB_DB_USER
value: "crowdsec"
- name: MB_DB_PASS
value: "${DB_PASSWORD}"
- name: MB_DB_HOST
value: "${mysql_host}"
- name: MB_EMAIL_SMTP_USERNAME
value: "info@viktorbarzin.me"
- name: MB_EMAIL_FROM_ADDRESS
value: "info@viktorbarzin.me"
- name: MB_EMAIL_SMTP_HOST
value: "mailserver.mailserver.svc.cluster.local"
- name: MB_EMAIL_SMTP_PASSWORD
value: "" # Ignore for now as it's unclear what notifications we can get
- name: MB_EMAIL_SMTP_PORT
value: "587"
- name: MB_EMAIL_SMTP_SECURITY
value: "starttls"
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
#nginx.ingress.kubernetes.io/auth-url: "https://oauth2.viktorbarzin.me/oauth2/auth"
nginx.ingress.kubernetes.io/auth-url: "http://ak-outpost-authentik-embedded-outpost.authentik.svc.cluster.local:9000/outpost.goauthentik.io/auth/nginx"
# nginx.ingress.kubernetes.io/auth-signin: "https://oauth2.viktorbarzin.me/oauth2/start?rd=/redirect/$http_host$escaped_request_uri"
nginx.ingress.kubernetes.io/auth-signin: "https://authentik.viktorbarzin.me/outpost.goauthentik.io/start?rd=$scheme%3A%2F%2F$host$escaped_request_uri"
nginx.ingress.kubernetes.io/auth-response-headers: "Set-Cookie,X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid"
nginx.ingress.kubernetes.io/auth-snippet: "proxy_set_header X-Forwarded-Host $http_host;"
gethomepage.dev/enabled: "true"
gethomepage.dev/description: "Web Application Firewall"
gethomepage.dev/icon: "crowdsec.png"
gethomepage.dev/name: "CrowdSec"
gethomepage.dev/group: "Identity & Security"
gethomepage.dev/widget.type: "crowdsec"
gethomepage.dev/widget.url: "http://crowdsec-service.crowdsec.svc.cluster.local:8080"
gethomepage.dev/widget.username: "${homepage_username}"
gethomepage.dev/widget.password: "${homepage_password}"
gethomepage.dev/pod-selector: ""
ingressClassName: "nginx"
host: "crowdsec.viktorbarzin.me"
tls:
- hosts:
- crowdsec.viktorbarzin.me
secretName: "tls-secret"
metrics:
enabled: true
strategy:
type: RollingUpdate
config:
# Custom profiles: captcha for rate limiting, ban for attacks
profiles.yaml: |
# Captcha for rate limiting and 403 abuse (user can unblock themselves)
name: captcha_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Ip" && Alert.GetScenario() in ["crowdsecurity/http-429-abuse", "crowdsecurity/http-403-abuse", "crowdsecurity/http-crawl-non_statics", "crowdsecurity/http-sensitive-files"]
decisions:
- type: captcha
duration: 4h
notifications:
- slack_alerts
on_success: break
---
# Default: Ban for serious attacks (CVE exploits, scanners, brute force)
name: default_ip_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Ip"
decisions:
- type: ban
duration: 4h
notifications:
- slack_alerts
on_success: break
---
name: default_range_remediation
filters:
- Alert.Remediation == true && Alert.GetScope() == "Range"
decisions:
- type: ban
duration: 4h
notifications:
- slack_alerts
on_success: break
config.yaml.local: |
db_config:
type: mysql
user: crowdsec
password: ${DB_PASSWORD}
db_name: crowdsec
host: ${mysql_host}
port: 3306
api:
server:
auto_registration: # Activate if not using TLS for authentication
enabled: true
token: "$${REGISTRATION_TOKEN}" # /!\ do not change
allowed_ranges: # /!\ adapt to the pod IP ranges used by your cluster
- "127.0.0.1/32"
- "192.168.0.0/16"
- "10.0.0.0/8"
- "172.16.0.0/12"
notifications:
slack.yaml: |
type: slack
name: slack_alerts
log_level: info
format: |
:rotating_light: *CrowdSec Alert*
{{range .}}
*Scenario:* {{.Alert.Scenario}}
*Source IP:* {{.Alert.Source.IP}} ({{.Alert.Source.Cn}})
*Decisions:*
{{range .Alert.Decisions}} - {{.Type}} for {{.Duration}} (scope: {{.Scope}}, value: {{.Value}})
{{end}}
{{end}}
webhook: ${SLACK_WEBHOOK_URL}

1
stacks/crowdsec/secrets Symbolic link
View file

@ -0,0 +1 @@
../../secrets

View file

@ -0,0 +1,8 @@
include "root" {
path = find_in_parent_folders()
}
dependency "infra" {
config_path = "../infra"
skip_outputs = true
}

10
stacks/crowdsec/tiers.tf Normal file
View file

@ -0,0 +1,10 @@
# Generated by Terragrunt. Sig: nIlQXj57tbuaRZEa
locals {
tiers = {
core = "0-core"
cluster = "1-cluster"
gpu = "2-gpu"
edge = "3-edge"
aux = "4-aux"
}
}

27
stacks/dbaas/main.tf Normal file
View file

@ -0,0 +1,27 @@
# =============================================================================
# DBaaS Stack MySQL + PostgreSQL + pgAdmin
# =============================================================================
variable "tls_secret_name" { type = string }
variable "nfs_server" { type = string }
variable "prod" {
type = bool
default = false
}
data "vault_kv_secret_v2" "secrets" {
mount = "secret"
name = "platform"
}
module "dbaas" {
source = "./modules/dbaas"
prod = var.prod
tls_secret_name = var.tls_secret_name
nfs_server = var.nfs_server
dbaas_root_password = data.vault_kv_secret_v2.secrets.data["dbaas_root_password"]
postgresql_root_password = data.vault_kv_secret_v2.secrets.data["dbaas_postgresql_root_password"]
pgadmin_password = data.vault_kv_secret_v2.secrets.data["dbaas_pgadmin_password"]
kube_config_path = var.kube_config_path
tier = local.tiers.cluster
}

View file

@ -0,0 +1,16 @@
tls:
useSelfSigned: true
credentials:
root:
password: ${root_password}
user: root
serverInstances: 1
podSpec:
containers:
- name: mysql
resources:
requests:
memory: "1024Mi" # adapt to your needs
cpu: "100m" # adapt to your needs
limits:
memory: "2048Mi" # adapt to your needs

View file

@ -0,0 +1,30 @@
apiVersion: mysql.presslabs.org/v1alpha1
kind: MysqlCluster
metadata:
name: mysql-cluster
namespace: dbaas
spec:
mysqlVersion: "5.7"
replicas: 1
secretName: cluster-secret
mysqlConf:
# read_only: 0 # mysql forms a single transaction for each sql statement, autocommit for each statement
# automatic_sp_privileges: "ON" # automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine
# auto_generate_certs: "ON" # Auto Generation of Certificate
# auto_increment_increment: 1 # Auto Incrementing value from +1
# auto_increment_offset: 1 # Auto Increment Offset
# binlog-format: "STATEMENT" # contains various options such ROW(SLOW,SAFE) STATEMENT(FAST,UNSAFE), MIXED(combination of both)
# wait_timeout: 31536000 # 28800 number of seconds the server waits for activity on a non-interactive connection before closing it, You might encounter MySQL server has gone away error, you then tweak this value acccordingly
# interactive_timeout: 28800 # The number of seconds the server waits for activity on an interactive connection before closing it.
# max_allowed_packet: "512M" # Maximum size of MYSQL Network protocol packet that the server can create or read 4MB, 8MB, 16MB, 32MB
# max-binlog-size: 1073741824 # binary logs contains the events that describe database changes, this parameter describe size for the bin_log file.
# log_output: "TABLE" # Format in which the logout will be dumped
# master-info-repository: "TABLE" # Format in which the master info will be dumped
# relay_log_info_repository: "TABLE" # Format in which the relay info will be dumped
volumeSpec:
persistentVolumeClaim:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,14 @@
---
orchestrator:
# persistence:
# enabled: false
ingress:
enable: false
hosts:
- host: db.viktorbarzin.me
paths:
- path: /
tls:
- secretName: ${secretName}
hosts:
- db.viktorbarzin.me

View file

@ -0,0 +1,30 @@
# Use the PostGIS image as the base
FROM pgvector/pgvector:0.8.0-pg16 as binary
FROM postgis/postgis:16-master
COPY --from=binary /pgvecto-rs-binary-release.deb /tmp/vectors.deb
RUN apt-get install -y /tmp/vectors.deb && rm -f /tmp/vectors.deb
# Install necessary packages
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
libpq-dev \
wget \
git \
postgresql-server-dev-16 \
postgresql-16-pgvector \
# Clean up to reduce layer size
&& rm -rf /var/lib/apt/lists/* \
&& cd /tmp \
&& git clone --branch v0.8.0 https://github.com/pgvector/pgvector.git \
&& cd pgvector \
&& make \
&& make install \
# Clean up unnecessary files
&& cd - \
&& apt-get purge -y --auto-remove build-essential postgresql-server-dev-16 libpq-dev wget git \
&& rm -rf /tmp/pgvector
# Copy initialization scripts
#COPY ./docker-entrypoint-initdb.d/ /docker-entrypoint-initdb.d/
CMD ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", "search_path=\"$user\", public, vectors", "-c", "logging_collector=on"]

View file

@ -0,0 +1,9 @@
# terraform {
# required_providers {
# kubectl = {
# source = "gavinbunney/kubectl"
# version = ">= 1.10.0"
# }
# }
# required_version = ">= 0.13"
# }

1
stacks/dbaas/secrets Symbolic link
View file

@ -0,0 +1 @@
../../secrets

View file

@ -0,0 +1,8 @@
include "root" {
path = find_in_parent_folders()
}
dependency "infra" {
config_path = "../infra"
skip_outputs = true
}

10
stacks/dbaas/tiers.tf Normal file
View file

@ -0,0 +1,10 @@
# Generated by Terragrunt. Sig: nIlQXj57tbuaRZEa
locals {
tiers = {
core = "0-core"
cluster = "1-cluster"
gpu = "2-gpu"
edge = "3-edge"
aux = "4-aux"
}
}

View file

@ -2,15 +2,18 @@
# Platform Stack Core & Cluster Services
# =============================================================================
#
# This stack groups ~22 core/cluster services that form the platform layer.
# This stack groups core/cluster services that form the platform layer.
# These services are always present (no DEFCON gating) and provide the
# foundational infrastructure that application stacks depend on.
#
# Services included:
# metallb, dbaas, cloudflared, infra-maintenance,
# redis, traefik, technitium, headscale, authentik, rbac, k8s-portal,
# crowdsec, monitoring, vaultwarden, reverse-proxy, metrics-server, vpa,
# metallb, cloudflared, infra-maintenance,
# redis, traefik, technitium, headscale, rbac, k8s-portal,
# monitoring, vaultwarden, reverse-proxy, metrics-server, vpa,
# nvidia, kyverno, uptime-kuma, wireguard, xray, mailserver
#
# Extracted to independent stacks:
# dbaas, authentik, crowdsec
# =============================================================================
# -----------------------------------------------------------------------------
@ -31,10 +34,6 @@ variable "postgresql_host" { type = string }
variable "mysql_host" { type = string }
variable "ollama_host" { type = string }
variable "mail_host" { type = string }
variable "prod" {
type = bool
default = false
}
variable "k8s_ca_cert" {
type = string
default = ""
@ -88,21 +87,6 @@ module "metallb" {
tier = local.tiers.core
}
# -----------------------------------------------------------------------------
# DBaaS MySQL + PostgreSQL + pgAdmin
# -----------------------------------------------------------------------------
module "dbaas" {
source = "./modules/dbaas"
prod = var.prod
tls_secret_name = var.tls_secret_name
nfs_server = var.nfs_server
dbaas_root_password = data.vault_kv_secret_v2.secrets.data["dbaas_root_password"]
postgresql_root_password = data.vault_kv_secret_v2.secrets.data["dbaas_postgresql_root_password"]
pgadmin_password = data.vault_kv_secret_v2.secrets.data["dbaas_pgadmin_password"]
kube_config_path = var.kube_config_path
tier = local.tiers.cluster
}
# -----------------------------------------------------------------------------
# Redis Shared Redis instance
# -----------------------------------------------------------------------------
@ -153,19 +137,6 @@ module "headscale" {
tier = local.tiers.core
}
# -----------------------------------------------------------------------------
# Authentik Identity provider (SSO)
# -----------------------------------------------------------------------------
module "authentik" {
source = "./modules/authentik"
tier = local.tiers.cluster
tls_secret_name = var.tls_secret_name
secret_key = data.vault_kv_secret_v2.secrets.data["authentik_secret_key"]
postgres_password = data.vault_kv_secret_v2.secrets.data["authentik_postgres_password"]
redis_host = var.redis_host
homepage_token = try(local.homepage_credentials["authentik"]["token"], "")
}
# -----------------------------------------------------------------------------
# RBAC Kubernetes OIDC RBAC (depends on Authentik)
# -----------------------------------------------------------------------------
@ -187,24 +158,6 @@ module "k8s-portal" {
k8s_ca_cert = var.k8s_ca_cert
}
# -----------------------------------------------------------------------------
# CrowdSec Security/WAF
# -----------------------------------------------------------------------------
module "crowdsec" {
source = "./modules/crowdsec"
tier = local.tiers.cluster
tls_secret_name = var.tls_secret_name
mysql_host = var.mysql_host
homepage_username = local.homepage_credentials["crowdsec"]["username"]
homepage_password = local.homepage_credentials["crowdsec"]["password"]
enroll_key = data.vault_kv_secret_v2.secrets.data["crowdsec_enroll_key"]
db_password = data.vault_kv_secret_v2.secrets.data["crowdsec_db_password"]
crowdsec_dash_api_key = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_api_key"]
crowdsec_dash_machine_id = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_machine_id"]
crowdsec_dash_machine_password = data.vault_kv_secret_v2.secrets.data["crowdsec_dash_machine_password"]
slack_webhook_url = data.vault_kv_secret_v2.secrets.data["alertmanager_slack_api_url"]
}
# -----------------------------------------------------------------------------
# Monitoring Prometheus / Grafana / Loki stack
# -----------------------------------------------------------------------------