Compare commits
6 commits
master
...
broker-syn
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
731de63150 | ||
|
|
9ce9a9a7f7 | ||
|
|
277babc696 | ||
|
|
d91fbd4a60 | ||
|
|
e81e836d3a | ||
|
|
d3be9b50af |
10 changed files with 630 additions and 443 deletions
1
.gitattributes
vendored
1
.gitattributes
vendored
|
|
@ -3,3 +3,4 @@
|
|||
*.tfstate filter=git-crypt diff=git-crypt
|
||||
*.tfvars filter=git-crypt diff=git-crypt
|
||||
secrets/** filter=git-crypt diff=git-crypt
|
||||
stacks/**/secrets/** filter=git-crypt diff=git-crypt
|
||||
|
|
|
|||
|
|
@ -1,229 +0,0 @@
|
|||
mqtt:
|
||||
enabled: false
|
||||
birdseye:
|
||||
quality: 25
|
||||
detect:
|
||||
fps: 1
|
||||
enabled: true
|
||||
go2rtc:
|
||||
streams:
|
||||
vermont-1:
|
||||
- rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/101/3
|
||||
cameras:
|
||||
# # Temp disabled until valchedrym is back up
|
||||
valchedrym-cam-1:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
#- path: rtsp://admin:REDACTED_RTSP_PW@192.168.0.11:554/Streaming/Channels/101 # <----- The stream you want to use for detection
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@valchedrym.ddns.net:554/Streaming/Channels/101 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
objects:
|
||||
# Optional: list of objects to track from labelmap.txt (full list - https://docs.frigate.video/configuration/objects)
|
||||
track:
|
||||
- person
|
||||
- bicycle
|
||||
- car
|
||||
- bird
|
||||
- cat
|
||||
- dog
|
||||
- horse
|
||||
valchedrym-cam-2:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
#- path: rtsp://admin:REDACTED_RTSP_PW@192.168.0.11:554/Streaming/Channels/201 # <----- The stream you want to use for detection
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@valchedrym.ddns.net:554/Streaming/Channels/201 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
objects:
|
||||
# Optional: list of objects to track from labelmap.txt (full list - https://docs.frigate.video/configuration/objects)
|
||||
track:
|
||||
- person
|
||||
- bicycle
|
||||
- car
|
||||
- bird
|
||||
- cat
|
||||
- dog
|
||||
- horse
|
||||
vermont-1:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/101/3 # <----- The stream you want to use for detection
|
||||
roles:
|
||||
- record
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
detect:
|
||||
enabled: false
|
||||
vermont-2:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/201/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-3:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/301/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-4:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/401/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-5:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/501/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-6:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/601/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-7:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/701/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-8:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/801/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
vermont-9:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://admin:REDACTED_RTSP_PW@192.168.1.10:554/Streaming/Channels/901/1 # <----- The stream you want to use for detection
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 704 # <---- update for your camera's resolution
|
||||
height: 576 # <---- update for your camera's resolution
|
||||
rtmp:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
# london-ipcam:
|
||||
# enabled: false
|
||||
# ffmpeg:
|
||||
# inputs:
|
||||
# - path: rtsp://192.168.2.2:8554/london_cam # <----- The stream you want to use for detection
|
||||
# roles:
|
||||
# - rtmp
|
||||
# - record
|
||||
# - detect
|
||||
# detect:
|
||||
# enabled: False
|
||||
# width: 1280
|
||||
# height: 720
|
||||
# record:
|
||||
# enabled: False # Not needed for this camera but keeping for reference
|
||||
# events:
|
||||
# retain:
|
||||
# default: 10
|
||||
# objects:
|
||||
# # Optional: list of objects to track from labelmap.txt (full list - https://docs.frigate.video/configuration/objects)
|
||||
# track:
|
||||
# - person
|
||||
# - shoe
|
||||
# - handbag
|
||||
# - wine glass
|
||||
# - knife
|
||||
# - pizza
|
||||
# - laptop
|
||||
# - book
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
#!/usr/bin/expect -f
|
||||
|
||||
set timeout -1
|
||||
set le_dir "/tmp/le/"
|
||||
set config_dir "$le_dir/out/config"
|
||||
set pwd [pwd]
|
||||
set technitium_token "REDACTED_TECHNITIUM_TOKEN"
|
||||
|
||||
spawn certbot certonly --manual --preferred-challenge=dns --email me@viktorbarzin.me --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d *.viktorbarzin.me -d viktorbarzin.me --config-dir $config_dir --work-dir $le_dir/workdir --logs-dir $le_dir/logsdir --no-eff-email
|
||||
|
||||
# Create challenge TXT record
|
||||
curl "http://technitium-web.technitium.svc.cluster.local:5380/api/zones/records/add?token=$API_TOKEN&domain=_acme-challenge.\$CERTBOT_DOMAIN&type=TXT&ttl=60&text=\$CERTBOT_VALIDATION"
|
||||
|
||||
# Sleep to make sure the change has time to propagate from primary to secondary name servers
|
||||
sleep 25
|
||||
}
|
||||
spawn /bin/sh
|
||||
send "echo \"$auth_contents\" > /root/certbot-auth.sh \r"
|
||||
send "chmod 700 /root/certbot-auth.sh \r"
|
||||
send "cat /root/certbot-auth.sh \r"
|
||||
send "exit \r"
|
||||
|
||||
# Contents for certbot-cleanup
|
||||
set cleanup_contents {#!/usr/bin/env sh
|
||||
exit 0 # DEBUG: TODO: Remove me
|
||||
# Generate API token from DNS web console
|
||||
API_TOKEN="REDACTED_TECHNITIUM_TOKEN"
|
||||
|
||||
# Delete challenge TXT record
|
||||
curl "http://technitium-web.technitium.svc.cluster.local:5380/api/zones/records/delete?token=$API_TOKEN&domain=_acme-challenge.\$CERTBOT_DOMAIN&type=TXT&text=\$CERTBOT_VALIDATION"
|
||||
}
|
||||
spawn /bin/sh
|
||||
send "echo \"$cleanup_contents\" > /root/certbot-cleanup.sh \r"
|
||||
send "chmod 700 /root/certbot-cleanup.sh \r"
|
||||
send "exit \r"
|
||||
|
||||
# Force deployment recreation
|
||||
# exec terraform taint module.kubernetes_cluster.module.bind.module.bind-public-deployment.kubernetes_deployment.bind
|
||||
exec terraform taint module.kubernetes_cluster.module.technitium.kubernetes_deployment.technitium
|
||||
# set current_time [clock seconds]
|
||||
# set formatted_time [clock format $current_time -format "+%Y-%m-%dT%TZ"]
|
||||
# exec curl -X PATCH https://10.0.20.100:6443/apis/apps/v1/namespaces/technitium/deployments/technitium -H \"Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" -H \"Content-Type:application/strategic-merge-patch+json\" -k -d '{\"spec\": {\"template\": {\"metadata\": { \"annotations\": {\"kubectl.kubernetes.io/restartedAt\": \"'$(date +%Y-%m-%dT%TZ)'\" }}}}}'
|
||||
# exec curl -X PATCH https://10.0.20.100:6443/apis/apps/v1/namespaces/technitium/deployments/technitium -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type: application/strategic-merge-patch+json" -k -d "{\"spec\": {\"template\": {\"metadata\": { \"annotations\": {\"kubectl.kubernetes.io/restartedAt\": \"$formatted_time\" }}}}}"
|
||||
# exec terraform taint module.kubernetes_cluster.module.technitium.module.technitium.kubernetes_deployment.technitium
|
||||
# Apply changes to configmap and redeploy
|
||||
exec >@stdout 2>@stderr terraform apply -auto-approve -target=module.kubernetes_cluster.module.technitium
|
||||
|
||||
# Wait for deployment update
|
||||
# TODO: better to use k8s api. What we want is `kubectl rollout status deployment -l app=bind-public` as a curl
|
||||
# exec bash -c 'while [[ $(kubectl get pods -l app=bind-public -o \'jsonpath={..status.conditions[\?(\@.type=="Ready")].status}\') != "True" ]]; do echo "waiting pod..." && sleep 1; done'
|
||||
exec >@stdout echo 'Waiting for redeployment of technitium...'
|
||||
exec sleep 10
|
||||
|
||||
# spawn certbot certonly --manual --preferred-challenge=dns --email me@viktorbarzin.me --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d *.viktorbarzin.me -d viktorbarzin.me --config-dir $config_dir --work-dir $le_dir/workdir --logs-dir $le_dir/logsdir --no-eff-email
|
||||
|
||||
# set prompt "$"
|
||||
# set dns_file "$pwd/modules/kubernetes/bind/extra/viktorbarzin.me"
|
||||
# # expect -re "Please deploy a DNS TXT record under the name" {
|
||||
# expect -re "Press Enter to Continue" {
|
||||
# set challenge [ exec sh -c "echo '$expect_out(buffer)' | tail -n 4 | head -n 1" ]
|
||||
# set dns_record "_acme-challenge IN TXT \"$challenge\""
|
||||
# puts "\nChallenge: '$challenge'"
|
||||
# # send \x03
|
||||
# puts "Dns file: '$dns_file'"
|
||||
|
||||
# # Check if dns record is not already present
|
||||
# try {
|
||||
# set results [exec grep -q $dns_record $dns_file]
|
||||
# set status 0
|
||||
# } trap CHILDSTATUS {results options} {
|
||||
# set status [lindex [dict get $options -errorcode] 2]
|
||||
# }
|
||||
# if {$status != 0} {
|
||||
# exec echo $dns_record | tee -a $dns_file
|
||||
# puts "Teed into file"
|
||||
# } else {
|
||||
# puts "DNS record '$dns_record' already in file"
|
||||
# }
|
||||
# }
|
||||
|
||||
# send -- "\r"
|
||||
# # Do the same for the 2nd dns record
|
||||
# expect -re "\[a-zA-Z0-9_-\]{43}" {
|
||||
# set challenge $expect_out(0,string)
|
||||
# # set challenge [ exec sh -c "echo $expect_out(0, buffer) | tail -n 8 | head -n 1" ]
|
||||
# set dns_record1 "_acme-challenge IN TXT \"$challenge\""
|
||||
# puts "Challenge: '$challenge'"
|
||||
# puts "Dns record: '$dns_record1'"
|
||||
# puts "Dns file: '$dns_file'"
|
||||
|
||||
# # Check if dns record is not already present
|
||||
# try {
|
||||
# set results [exec grep -q $dns_record1 $dns_file]
|
||||
# set status 0
|
||||
# } trap CHILDSTATUS {results options} {
|
||||
# set status [lindex [dict get $options -errorcode] 2]
|
||||
# }
|
||||
# if {$status != 0} {
|
||||
# exec echo $dns_record1 | tee -a $dns_file
|
||||
# puts "Teed into file"
|
||||
# } else {
|
||||
# puts "DNS record '$dns_record1' already in file"
|
||||
# }
|
||||
# }
|
||||
|
||||
# # Force deployment recreation
|
||||
# # exec terraform taint module.kubernetes_cluster.module.bind.module.bind-public-deployment.kubernetes_deployment.bind
|
||||
# exec terraform taint module.kubernetes_cluster.module.technitium.kubernetes_deployment.technitium
|
||||
# # set current_time [clock seconds]
|
||||
# # set formatted_time [clock format $current_time -format "+%Y-%m-%dT%TZ"]
|
||||
# # exec curl -X PATCH https://10.0.20.100:6443/apis/apps/v1/namespaces/technitium/deployments/technitium -H \"Authorization:Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" -H \"Content-Type:application/strategic-merge-patch+json\" -k -d '{\"spec\": {\"template\": {\"metadata\": { \"annotations\": {\"kubectl.kubernetes.io/restartedAt\": \"'$(date +%Y-%m-%dT%TZ)'\" }}}}}'
|
||||
# # exec curl -X PATCH https://10.0.20.100:6443/apis/apps/v1/namespaces/technitium/deployments/technitium -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -H "Content-Type: application/strategic-merge-patch+json" -k -d "{\"spec\": {\"template\": {\"metadata\": { \"annotations\": {\"kubectl.kubernetes.io/restartedAt\": \"$formatted_time\" }}}}}"
|
||||
# # exec terraform taint module.kubernetes_cluster.module.technitium.module.technitium.kubernetes_deployment.technitium
|
||||
# # Apply changes to configmap and redeploy
|
||||
# exec >@stdout 2>@stderr terraform apply -auto-approve -target=module.kubernetes_cluster.module.technitium
|
||||
|
||||
# # Wait for deployment update
|
||||
# # TODO: better to use k8s api. What we want is `kubectl rollout status deployment -l app=bind-public` as a curl
|
||||
# # exec bash -c 'while [[ $(kubectl get pods -l app=bind-public -o \'jsonpath={..status.conditions[\?(\@.type=="Ready")].status}\') != "True" ]]; do echo "waiting pod..." && sleep 1; done'
|
||||
# exec >@stdout echo 'Waiting for redeployment of technitium...'
|
||||
# exec sleep 10
|
||||
|
||||
# send -- "\r"
|
||||
|
||||
# # Clean up
|
||||
# exec sed -i "s/$dns_record//g" "$dns_file"
|
||||
# exec sed -i "s/$dns_record1//g" "$dns_file"
|
||||
|
||||
# Success
|
||||
expect ".*Congratulations!"
|
||||
|
||||
# Copy cert and key to secrets dir
|
||||
exec cp --remove-destination $config_dir/live/viktorbarzin.me/fullchain.pem ./secrets
|
||||
exec cp --remove-destination $config_dir/live/viktorbarzin.me/privkey.pem ./secrets
|
||||
|
||||
puts "Done renewing cert. Output certificates stored in ./secrets\n"
|
||||
|
|
@ -386,12 +386,13 @@ module "tls_secret" {
|
|||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "../../modules/kubernetes/ingress_factory"
|
||||
dns_type = "proxied"
|
||||
namespace = kubernetes_namespace.beads.metadata[0].name
|
||||
name = "dolt-workbench"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = true
|
||||
source = "../../modules/kubernetes/ingress_factory"
|
||||
dns_type = "proxied"
|
||||
namespace = kubernetes_namespace.beads.metadata[0].name
|
||||
name = "dolt-workbench"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = false
|
||||
exclude_crowdsec = true
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "Dolt Workbench"
|
||||
|
|
@ -595,12 +596,13 @@ resource "kubernetes_service" "beadboard" {
|
|||
}
|
||||
|
||||
module "beadboard_ingress" {
|
||||
source = "../../modules/kubernetes/ingress_factory"
|
||||
dns_type = "proxied"
|
||||
namespace = kubernetes_namespace.beads.metadata[0].name
|
||||
name = "beadboard"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = true
|
||||
source = "../../modules/kubernetes/ingress_factory"
|
||||
dns_type = "proxied"
|
||||
namespace = kubernetes_namespace.beads.metadata[0].name
|
||||
name = "beadboard"
|
||||
tls_secret_name = var.tls_secret_name
|
||||
protected = true
|
||||
exclude_crowdsec = true
|
||||
extra_annotations = {
|
||||
"gethomepage.dev/enabled" = "true"
|
||||
"gethomepage.dev/name" = "BeadBoard"
|
||||
|
|
|
|||
599
stacks/broker-sync/main.tf
Normal file
599
stacks/broker-sync/main.tf
Normal file
|
|
@ -0,0 +1,599 @@
|
|||
variable "nfs_server" { type = string }
|
||||
|
||||
variable "image_tag" {
|
||||
type = string
|
||||
default = "latest"
|
||||
description = "broker-sync image tag. Use 8-char git SHA in CI; :latest only for local trials."
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "broker_sync" {
|
||||
metadata {
|
||||
name = "broker-sync"
|
||||
labels = {
|
||||
"istio-injection" = "disabled"
|
||||
tier = local.tiers.aux
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Secrets for all providers. Seeded in Vault at `secret/broker-sync`:
|
||||
# wf_base_url — e.g. https://wealthfolio.viktorbarzin.me
|
||||
# wf_username — Wealthfolio login username
|
||||
# wf_password — Wealthfolio login password (cleartext; server stores Argon2id)
|
||||
# trading212_api_keys — JSON array of {account_id, account_type, api_key, name, currency}
|
||||
# imap_host, imap_user, imap_password, imap_directory — for InvestEngine + Schwab email ingest
|
||||
resource "kubernetes_manifest" "external_secret" {
|
||||
manifest = {
|
||||
apiVersion = "external-secrets.io/v1beta1"
|
||||
kind = "ExternalSecret"
|
||||
metadata = {
|
||||
name = "broker-sync-secrets"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
}
|
||||
spec = {
|
||||
refreshInterval = "15m"
|
||||
secretStoreRef = {
|
||||
name = "vault-kv"
|
||||
kind = "ClusterSecretStore"
|
||||
}
|
||||
target = {
|
||||
name = "broker-sync-secrets"
|
||||
}
|
||||
dataFrom = [{
|
||||
extract = {
|
||||
key = "broker-sync"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
depends_on = [kubernetes_namespace.broker_sync]
|
||||
}
|
||||
|
||||
# Canonical data dir — SQLite watermarks, FX cache, CSV drop/archive, Wealthfolio session cache.
|
||||
# Encrypted because we're storing brokerage tokens, session cookies, and transaction history.
|
||||
resource "kubernetes_persistent_volume_claim" "data_encrypted" {
|
||||
wait_until_bound = false
|
||||
metadata {
|
||||
name = "broker-sync-data-encrypted"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
annotations = {
|
||||
"resize.topolvm.io/threshold" = "80%"
|
||||
"resize.topolvm.io/increase" = "100%"
|
||||
"resize.topolvm.io/storage_limit" = "5Gi"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
access_modes = ["ReadWriteOnce"]
|
||||
storage_class_name = "proxmox-lvm-encrypted"
|
||||
resources {
|
||||
requests = { storage = "1Gi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
broker_sync_image = "viktorbarzin/broker-sync:${var.image_tag}"
|
||||
|
||||
# Shared env block for every CronJob: auth into Wealthfolio + data path.
|
||||
common_env = [
|
||||
{ name = "BROKER_SYNC_DATA_DIR", value = "/data", from = null },
|
||||
{ name = "WF_SESSION_PATH", value = "/data/wealthfolio_session.json", from = null },
|
||||
{ name = "WF_BASE_URL", value = null, from = "wf_base_url" },
|
||||
{ name = "WF_USERNAME", value = null, from = "wf_username" },
|
||||
{ name = "WF_PASSWORD", value = null, from = "wf_password" },
|
||||
]
|
||||
}
|
||||
|
||||
# Phase 0 liveness: proves the image + namespace + PVC + ESO wiring end-to-end.
|
||||
# Suspended by default; toggle to false to run.
|
||||
resource "kubernetes_cron_job_v1" "version_probe" {
|
||||
metadata {
|
||||
name = "broker-sync-version"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "version-probe" }
|
||||
}
|
||||
spec {
|
||||
schedule = "0 1 * * *"
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 1
|
||||
failed_jobs_history_limit = 3
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 1
|
||||
ttl_seconds_after_finished = 300
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "version-probe" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "broker-sync"
|
||||
image = local.broker_sync_image
|
||||
command = ["broker-sync", "version"]
|
||||
resources {
|
||||
requests = { cpu = "10m", memory = "32Mi" }
|
||||
limits = { memory = "128Mi" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Trading212 steady-state daily sync. Phase 1 deliverable.
|
||||
resource "kubernetes_cron_job_v1" "trading212" {
|
||||
metadata {
|
||||
name = "broker-sync-trading212"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "trading212" }
|
||||
}
|
||||
spec {
|
||||
schedule = "0 2 * * *" # 02:00 UK
|
||||
concurrency_policy = "Forbid"
|
||||
starting_deadline_seconds = 300
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 5
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 2
|
||||
ttl_seconds_after_finished = 86400
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "trading212" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "broker-sync"
|
||||
image = local.broker_sync_image
|
||||
command = ["broker-sync", "trading212", "--mode", "steady"]
|
||||
|
||||
env {
|
||||
name = "BROKER_SYNC_DATA_DIR"
|
||||
value = "/data"
|
||||
}
|
||||
env {
|
||||
name = "WF_SESSION_PATH"
|
||||
value = "/data/wealthfolio_session.json"
|
||||
}
|
||||
env {
|
||||
name = "WF_BASE_URL"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_base_url"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_USERNAME"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_username"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_PASSWORD"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_password"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "T212_API_KEYS_JSON"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "trading212_api_keys"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/data"
|
||||
}
|
||||
resources {
|
||||
requests = { cpu = "20m", memory = "128Mi" }
|
||||
limits = { memory = "256Mi" }
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.data_encrypted.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# IMAP ingest — InvestEngine + Schwab email parsers, one combined pod.
|
||||
# Phase 2 deliverable. Defined ahead of implementation so the rollout is
|
||||
# one `tf apply` once the image supports the CLI subcommand.
|
||||
resource "kubernetes_cron_job_v1" "imap" {
|
||||
metadata {
|
||||
name = "broker-sync-imap"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "imap" }
|
||||
}
|
||||
spec {
|
||||
schedule = "30 2 * * *" # 02:30 UK, 30min after T212
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 5
|
||||
suspend = true # enable in Phase 2
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 2
|
||||
ttl_seconds_after_finished = 86400
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "imap" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "broker-sync"
|
||||
image = local.broker_sync_image
|
||||
command = ["broker-sync", "imap"]
|
||||
|
||||
env {
|
||||
name = "BROKER_SYNC_DATA_DIR"
|
||||
value = "/data"
|
||||
}
|
||||
env {
|
||||
name = "WF_SESSION_PATH"
|
||||
value = "/data/wealthfolio_session.json"
|
||||
}
|
||||
env {
|
||||
name = "WF_BASE_URL"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_base_url"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_USERNAME"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_username"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_PASSWORD"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_password"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "IMAP_HOST"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "imap_host"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "IMAP_USER"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "imap_user"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "IMAP_PASSWORD"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "imap_password"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "IMAP_DIRECTORY"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "imap_directory"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/data"
|
||||
}
|
||||
resources {
|
||||
requests = { cpu = "10m", memory = "64Mi" }
|
||||
limits = { memory = "256Mi" }
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.data_encrypted.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# CSV drop-folder processor — Scottish Widows, Fidelity quarterly, Freetrade, etc.
|
||||
# Phase 3 deliverable. Suspended until CLI subcommand lands.
|
||||
resource "kubernetes_cron_job_v1" "csv_drop" {
|
||||
metadata {
|
||||
name = "broker-sync-csv"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "csv" }
|
||||
}
|
||||
spec {
|
||||
schedule = "0 3 * * *" # 03:00 UK
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 5
|
||||
suspend = true
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 1
|
||||
ttl_seconds_after_finished = 86400
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "csv" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "broker-sync"
|
||||
image = local.broker_sync_image
|
||||
command = ["broker-sync", "csv-drop"]
|
||||
|
||||
env {
|
||||
name = "BROKER_SYNC_DATA_DIR"
|
||||
value = "/data"
|
||||
}
|
||||
env {
|
||||
name = "WF_SESSION_PATH"
|
||||
value = "/data/wealthfolio_session.json"
|
||||
}
|
||||
env {
|
||||
name = "WF_BASE_URL"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_base_url"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_USERNAME"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_username"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_PASSWORD"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_password"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/data"
|
||||
}
|
||||
resources {
|
||||
requests = { cpu = "10m", memory = "64Mi" }
|
||||
limits = { memory = "128Mi" }
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.data_encrypted.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Monthly HMRC FX reconciliation — rewrites last-month activities with official
|
||||
# HMRC rates once they publish. Phase 1 tail / Phase 2 deliverable.
|
||||
resource "kubernetes_cron_job_v1" "fx_reconcile" {
|
||||
metadata {
|
||||
name = "broker-sync-fx-reconcile"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "fx-reconcile" }
|
||||
}
|
||||
spec {
|
||||
schedule = "5 5 7 * *" # 05:05 UK on the 7th
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 5
|
||||
suspend = true
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 1
|
||||
ttl_seconds_after_finished = 86400
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "fx-reconcile" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "broker-sync"
|
||||
image = local.broker_sync_image
|
||||
command = ["broker-sync", "fx-reconcile"]
|
||||
|
||||
env {
|
||||
name = "BROKER_SYNC_DATA_DIR"
|
||||
value = "/data"
|
||||
}
|
||||
env {
|
||||
name = "WF_SESSION_PATH"
|
||||
value = "/data/wealthfolio_session.json"
|
||||
}
|
||||
env {
|
||||
name = "WF_BASE_URL"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_base_url"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_USERNAME"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_username"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "WF_PASSWORD"
|
||||
value_from {
|
||||
secret_key_ref {
|
||||
name = "broker-sync-secrets"
|
||||
key = "wf_password"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/data"
|
||||
}
|
||||
resources {
|
||||
requests = { cpu = "10m", memory = "64Mi" }
|
||||
limits = { memory = "128Mi" }
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.data_encrypted.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Backup: snapshot sync.db / fx.db / csv-archive into NFS daily, keep 30 days.
|
||||
# Convention from infra/.claude/CLAUDE.md: every proxmox-lvm app needs a backup
|
||||
# CronJob writing to /mnt/main/<app>-backup/ on the PVE host (served over NFS).
|
||||
resource "kubernetes_cron_job_v1" "backup" {
|
||||
metadata {
|
||||
name = "broker-sync-backup"
|
||||
namespace = kubernetes_namespace.broker_sync.metadata[0].name
|
||||
labels = { app = "broker-sync", component = "backup" }
|
||||
}
|
||||
spec {
|
||||
schedule = "15 4 * * *" # 04:15 UK — after all syncs
|
||||
concurrency_policy = "Forbid"
|
||||
successful_jobs_history_limit = 3
|
||||
failed_jobs_history_limit = 5
|
||||
job_template {
|
||||
metadata {}
|
||||
spec {
|
||||
backoff_limit = 1
|
||||
ttl_seconds_after_finished = 86400
|
||||
template {
|
||||
metadata {
|
||||
labels = { app = "broker-sync", component = "backup" }
|
||||
}
|
||||
spec {
|
||||
restart_policy = "OnFailure"
|
||||
container {
|
||||
name = "backup"
|
||||
image = "alpine:3.20"
|
||||
command = ["/bin/sh", "-c", <<-EOT
|
||||
set -eu
|
||||
TIMESTAMP=$(date +%Y-%m-%dT%H-%M-%S)
|
||||
BACKUP_DIR="/backup/$TIMESTAMP"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
cp -a /data/sync.db "$BACKUP_DIR/" 2>/dev/null || true
|
||||
cp -a /data/fx.db "$BACKUP_DIR/" 2>/dev/null || true
|
||||
if [ -d /data/csv-archive ]; then
|
||||
cp -a /data/csv-archive "$BACKUP_DIR/"
|
||||
fi
|
||||
# Retention: keep last 30 days.
|
||||
find /backup -mindepth 1 -maxdepth 1 -type d -mtime +30 -exec rm -rf {} +
|
||||
echo "Backup complete: $BACKUP_DIR"
|
||||
EOT
|
||||
]
|
||||
volume_mount {
|
||||
name = "data"
|
||||
mount_path = "/data"
|
||||
read_only = true
|
||||
}
|
||||
volume_mount {
|
||||
name = "backup"
|
||||
mount_path = "/backup"
|
||||
}
|
||||
resources {
|
||||
requests = { cpu = "5m", memory = "16Mi" }
|
||||
limits = { memory = "64Mi" }
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim.data_encrypted.metadata[0].name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "backup"
|
||||
nfs {
|
||||
server = var.nfs_server
|
||||
path = "/srv/nfs/broker-sync-backup"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
13
stacks/broker-sync/terragrunt.hcl
Normal file
13
stacks/broker-sync/terragrunt.hcl
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
include "root" {
|
||||
path = find_in_parent_folders()
|
||||
}
|
||||
|
||||
dependency "platform" {
|
||||
config_path = "../platform"
|
||||
skip_outputs = true
|
||||
}
|
||||
|
||||
dependency "vault" {
|
||||
config_path = "../vault"
|
||||
skip_outputs = true
|
||||
}
|
||||
1
stacks/claude-memory/secrets
Symbolic link
1
stacks/claude-memory/secrets
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../secrets
|
||||
1
stacks/foolery/secrets
Symbolic link
1
stacks/foolery/secrets
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../secrets
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
tag=server-power-cycle-script
|
||||
logger -t $tag start $(date '+%F-%R')
|
||||
|
||||
if [ -f /tmp/server-power-cycle-lock ]; then
|
||||
logger -t $tag 'Script already running. exiting'
|
||||
exit 0
|
||||
fi
|
||||
touch /tmp/server-power-cycle-lock
|
||||
|
||||
|
||||
if [ -f /root/server-power-cycle/state.off ]; then
|
||||
logger -t $tag 'Server state set to off'
|
||||
while true; do
|
||||
sleep 60 # sleep 1 minute
|
||||
logger -t $tag 'Trying to connect to idrac system...'
|
||||
curl --connect-timeout 5 -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2
|
||||
if [[ $? -eq 0 ]]; then
|
||||
logger -t $tag "Connected to idrac, assuming power is back on"
|
||||
logger -t $tag "Power supply restored, sending power on command"
|
||||
curl -s -k -u root:calvin -X POST -d '{"Action": "Reset", "ResetType": "On"}' -H"Content-type: application/json" https://192.168.1.4/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset
|
||||
rm /root/server-power-cycle/state.off
|
||||
|
||||
logger -t $tag end $(date '+%F-%R')
|
||||
rm /tmp/server-power-cycle-lock
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
voltage=$(curl -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2 |jq .LineInputVoltage)
|
||||
# check input voltage on the pwoer supply connected to the outer system
|
||||
if [[ $voltage -gt 0 ]]; then
|
||||
logger -t $tag "power supply is on. exiting"
|
||||
logger -t $tag end $(date '+%F-%R')
|
||||
rm /tmp/server-power-cycle-lock
|
||||
exit 0
|
||||
fi
|
||||
|
||||
to_wait=30
|
||||
echo "Continuously checking power supply for the next $to_wait minutes"
|
||||
|
||||
for i in $(seq 30); do
|
||||
logger -t $tag "Sleeping a minute..Minute $i"
|
||||
sleep 60
|
||||
|
||||
# check input voltage on the pwoer supply connected to the outer system
|
||||
voltage=$(curl -s -k -u root:calvin -H"Content-type: application/json" -X GET https://192.168.1.4/redfish/v1/Chassis/System.Embedded.1/Power/PowerSupplies/PSU.Slot.2 |jq .LineInputVoltage)
|
||||
if [[ $voltage -gt 0 ]]; then
|
||||
logger -t $tag "power supply is on. exiting"
|
||||
|
||||
logger -t $tag end $(date '+%F-%R')
|
||||
rm /tmp/server-power-cycle-lock
|
||||
exit 0
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
logger -t $tag "Power supply did not come back, sending graceful shutdown signal"
|
||||
curl -s -k -u root:calvin -X POST -d '{"Action": "Reset", "ResetType": "GracefulShutdown"}' -H"Content-type: application/json" https://192.168.1.4/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset
|
||||
|
||||
touch /root/server-power-cycle/state.off
|
||||
rm /tmp/server-power-cycle-lock
|
||||
logger -t $tag end $(date '+%F-%R')
|
||||
1
stacks/terminal/secrets
Symbolic link
1
stacks/terminal/secrets
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../../secrets
|
||||
Loading…
Add table
Add a link
Reference in a new issue