fix: technitium CronJob scheduling, LUKS backup support, speedtest scrape

- technitium-password-sync: remove RWO encrypted PVC mount that caused
  pods to stick in ContainerCreating on wrong nodes. Plugin install now
  warns instead of failing when zip unavailable.
- daily-backup: add LUKS decryption support for encrypted PVC snapshots
  using /root/.luks-backup-key. Uses noload mount option to skip ext4
  journal replay. Also installed cryptsetup-bin on PVE host.
- speedtest: disable prometheus.io/scrape annotation (no /prometheus
  endpoint exists, causing ScrapeTargetDown alert).

[ci skip]

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Viktor Barzin 2026-04-15 15:12:32 +00:00
parent 25ef5176bb
commit 9baefa22ab
3 changed files with 27 additions and 19 deletions

View file

@ -145,8 +145,26 @@ else
continue
fi
# Detect LUKS-encrypted volumes and set up mount device
LUKS_NAME=""
MOUNT_DEV="/dev/pve/${snap}"
MOUNT_OPTS="ro"
if blkid -o value -s TYPE "/dev/pve/${snap}" 2>/dev/null | grep -q 'crypto_LUKS'; then
LUKS_KEY="/root/.luks-backup-key"
LUKS_NAME="pvc-snap-$(echo "${snap}" | md5sum | cut -c1-12)"
if [ -f "${LUKS_KEY}" ] && cryptsetup open --type luks --key-file "${LUKS_KEY}" --readonly "/dev/pve/${snap}" "${LUKS_NAME}" 2>&1; then
MOUNT_DEV="/dev/mapper/${LUKS_NAME}"
MOUNT_OPTS="ro,noload" # noload skips ext4 journal replay on read-only LUKS
log " LUKS: decrypted ${snap}${LUKS_NAME}"
else
warn "Failed to decrypt LUKS snapshot ${snap}"
PVC_FAIL=$((PVC_FAIL + 1))
continue
fi
fi
# Mount snapshot read-only, rsync files
if timeout 30 mount -o ro "/dev/pve/${snap}" "${PVC_MOUNT}" 2>&1; then
if timeout 30 mount -o "${MOUNT_OPTS}" "${MOUNT_DEV}" "${PVC_MOUNT}" 2>&1; then
dst="${BACKUP_ROOT}/pvc-data/${WEEK}/${ns_pvc}"
mkdir -p "${dst}"
if rsync -az --delete \
@ -182,6 +200,11 @@ else
warn "Failed to mount snapshot ${snap}"
PVC_FAIL=$((PVC_FAIL + 1))
fi
# Close LUKS device if we opened one
if [ -n "${LUKS_NAME}" ]; then
cryptsetup close "${LUKS_NAME}" 2>/dev/null || true
fi
done
log " PVC copy: ${PVC_COUNT} OK, ${PVC_FAIL} failed"

View file

@ -206,9 +206,7 @@ resource "kubernetes_service" "speedtest" {
"app" = "speedtest"
}
annotations = {
"prometheus.io/scrape" = "true"
"prometheus.io/path" = "/prometheus"
"prometheus.io/port" = "80"
"prometheus.io/scrape" = "false"
}
}

View file

@ -470,11 +470,6 @@ resource "kubernetes_cron_job_v1" "technitium_password_sync" {
name = "TECH_PASS"
value = var.technitium_password
}
volume_mount {
name = "technitium-data"
mount_path = "/etc/dns"
read_only = true
}
command = ["/bin/sh", "-c", <<-EOT
set -e
TOKEN=$$(curl -sf "http://technitium-web:5380/api/user/login?user=$$TECH_USER&pass=$$TECH_PASS" | grep -o '"token":"[^"]*"' | cut -d'"' -f4)
@ -485,12 +480,10 @@ resource "kubernetes_cron_job_v1" "technitium_password_sync" {
curl -sf -X POST "http://technitium-web:5380/api/apps/config/set?token=$$TOKEN" --data-urlencode "name=Query Logs (MySQL)" --data-urlencode "config=$$MYSQL_CONFIG"
echo "MySQL logging disabled"
# Install PG plugin if not already loaded (survives restarts via NFS, but not upgrades)
# Check PG plugin is loaded (installed persistently in Technitium data dir)
PG_LOADED=$$(curl -sf "http://technitium-web:5380/api/apps/list?token=$$TOKEN" | grep -c 'QueryLogsPostgres.App' || true)
if [ "$$PG_LOADED" = "0" ]; then
echo "PG plugin not loaded, installing from NFS..."
curl -sf -X POST "http://technitium-web:5380/api/apps/install?token=$$TOKEN&name=Query%20Logs%20(Postgres)" -F "fileData=@/etc/dns/QueryLogsPostgresApp.zip"
echo "PG plugin installed"
echo "WARNING: PG plugin not loaded — reinstall manually via Technitium UI"
fi
# Configure PG query logging
@ -500,12 +493,6 @@ resource "kubernetes_cron_job_v1" "technitium_password_sync" {
EOT
]
}
volume {
name = "technitium-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim.primary_config_encrypted.metadata[0].name
}
}
restart_policy = "OnFailure"
}
}