From 10fd88aec5fa4d2928c8c0ccc71eb003903e38e6 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Sat, 18 Apr 2026 22:25:19 +0000 Subject: [PATCH] =?UTF-8?q?wealthfolio:=20add=20nightly=20backup=20sidecar?= =?UTF-8?q?=20=E2=80=94=20SQLite=20=E2=86=92=20NFS?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Context Upstream Wealthfolio uses SQLite exclusively (Diesel ORM, no PG/MySQL support — confirmed 2026-04-18 via repo inspection). The DB lives on an RWO PVC (proxmox-lvm-encrypted) held 24/7 by the main pod. First attempt at a standalone backup CronJob failed with Multi-Attach error: RWO volume is already attached to the running WF pod, so no separate pod can mount it. Switched to a backup sidecar in the same pod — shares the PVC mount naturally. ## This change - `container "backup"` added to the WF Deployment: - alpine:3.20 + sqlite + busybox-suid (for crond). - Mounts /data read-only (shared with WF container) + /backup (new NFS volume at 192.168.1.127:/srv/nfs/wealthfolio-backup). - Writes /etc/crontabs/root with a `30 4 * * *` line + /scripts/backup.sh which runs `sqlite3 .backup` (WAL-safe online snapshot, zero downtime), copies secrets.json, and prunes anything older than 30d. - 16Mi request / 64Mi limit — sleeps most of the time. - NFS volume declared in pod spec — server from the existing `var.nfs_server` variable; path `/srv/nfs/wealthfolio-backup` created on the PVE host in the same session. Removed the standalone backup CronJob that couldn't work. ## Verification ### Automated `scripts/tg apply stacks/wealthfolio` → Apply complete! Resources: 0 added, 1 changed, 1 destroyed (the transient CronJob). ### Manual (2026-04-18) $ kubectl -n wealthfolio get pods -l app=wealthfolio wealthfolio-95d8bd498-cj8kw 2/2 Running $ kubectl -n wealthfolio logs -c backup wealthfolio-backup sidecar ready; next 04:30 UTC $ kubectl -n wealthfolio exec -c backup -- /scripts/backup.sh wealthfolio-backup: /backup/2026-04-18T22-24-55 (34.2M) $ ls /srv/nfs/wealthfolio-backup/ 2026-04-18T22-24-55/ ← first sidecar-produced backup ## Reproduce locally 1. kubectl -n wealthfolio exec $(kubectl -n wealthfolio get pods -l app=wealthfolio -o jsonpath='{.items[0].metadata.name}') -c backup -- /scripts/backup.sh 2. ssh root@192.168.1.127 ls /srv/nfs/wealthfolio-backup/ 3. Expected: new dated folder appears with wealthfolio.db + secrets.json. Co-Authored-By: Claude Opus 4.7 (1M context) --- stacks/wealthfolio/main.tf | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/stacks/wealthfolio/main.tf b/stacks/wealthfolio/main.tf index d223d024..a469e9b3 100644 --- a/stacks/wealthfolio/main.tf +++ b/stacks/wealthfolio/main.tf @@ -168,12 +168,65 @@ resource "kubernetes_deployment" "wealthfolio" { } } } + + # Backup sidecar — see the big comment further down. Shares the WF + # data PVC (read-only) + the NFS backup target. busybox crond fires + # a nightly sqlite3 .backup so we have an off-cluster copy. + container { + name = "backup" + image = "alpine:3.20" + command = ["/bin/sh", "-c", <<-EOT + set -eu + apk add --no-cache --quiet sqlite busybox-suid + mkdir -p /etc/crontabs + cat >/etc/crontabs/root <<'CRON' + 30 4 * * * /scripts/backup.sh >>/proc/1/fd/1 2>&1 + CRON + mkdir -p /scripts + cat >/scripts/backup.sh <<'SCRIPT' + #!/bin/sh + set -eu + TS=$(date +%Y-%m-%dT%H-%M-%S) + DIR=/backup/$TS + mkdir -p "$DIR" + sqlite3 /data/wealthfolio.db ".backup $DIR/wealthfolio.db" + cp /data/secrets.json "$DIR/" 2>/dev/null || true + # Retention — keep 30 days. + find /backup -mindepth 1 -maxdepth 1 -type d -mtime +30 -exec rm -rf {} + + echo "wealthfolio-backup: $DIR ($(du -sh $DIR | cut -f1))" + SCRIPT + chmod +x /scripts/backup.sh + echo "wealthfolio-backup sidecar ready; next 04:30 UTC" + exec crond -f -l 8 + EOT + ] + volume_mount { + name = "data" + mount_path = "/data" + read_only = true + } + volume_mount { + name = "backup" + mount_path = "/backup" + } + resources { + requests = { cpu = "5m", memory = "16Mi" } + limits = { memory = "64Mi" } + } + } volume { name = "data" persistent_volume_claim { claim_name = "wealthfolio-data-encrypted" } } + volume { + name = "backup" + nfs { + server = var.nfs_server + path = "/srv/nfs/wealthfolio-backup" + } + } } } } @@ -320,3 +373,21 @@ resource "kubernetes_cron_job_v1" "wealthfolio_sync" { ignore_changes = [spec[0].job_template[0].spec[0].template[0].spec[0].dns_config] } } + +############################################################################ +# Backup — sidecar approach +# +# Wealthfolio has no PG/MySQL support (Diesel ORM hard-wired to SQLite per +# upstream README). The data lives on an RWO PVC that's held 24/7 by the +# main WF pod, so a separate backup CronJob would hit a Multi-Attach error +# (confirmed 2026-04-18 test). +# +# Instead, the WF Deployment gets a backup sidecar: +# - Shares the data PVC read-only + the NFS backup target. +# - Runs busybox `crond` with a 04:30-daily entry. +# - Uses `sqlite3 .backup` (WAL-safe, no downtime) to snapshot into an +# NFS dated folder + retains 30 days. +# +# See `resource "kubernetes_deployment" "wealthfolio"` above — the sidecar +# is wired in via the deployment's container/volume blocks. +############################################################################