Compare commits

...

10 commits

Author SHA1 Message Date
Viktor Barzin
29567103d6 Add DRONE_WEBHOOK_SECRET for GitHub webhook authentication
Fixes webhook signature validation failures causing 400 errors.
2026-02-01 20:42:07 +00:00
Viktor Barzin
4a857ebefd Add per-pod GPU memory metrics exporter
- Add DaemonSet that runs on GPU node and exposes Prometheus metrics
- Uses nvidia-smi to collect per-process GPU memory usage
- Maps PIDs to container IDs via /proc/<pid>/cgroup
- Exposes gpu_pod_memory_used_bytes metric at :9401/metrics
- Add Prometheus scrape config for gpu-pod-memory job

[ci skip]
2026-01-31 16:58:14 +00:00
Viktor Barzin
09a5e3a273
Add crowdsec-blocklist-import CronJob
Import public threat intelligence blocklists into CrowdSec daily at 4 AM.
Uses kubectl exec to run the import script inside an existing CrowdSec
agent pod that is already registered with the LAPI.

Source: https://github.com/wolffcatskyy/crowdsec-blocklist-import

[ci skip]
2026-01-28 20:11:44 +00:00
Viktor Barzin
2ac92167c5
fix resume pdf generation [ci skip] 2026-01-28 19:42:13 +00:00
Viktor Barzin
eeabe652d3
upgrade immich to 2.5.0 [ci skip] 2026-01-28 19:41:52 +00:00
Viktor Barzin
078e1eeeef
add the yt-highlights app [ci skip] 2026-01-28 18:03:49 +00:00
Viktor Barzin
f867de6e7d
ad service for youtube video highlights [ci skip] 2026-01-28 17:58:39 +00:00
Viktor Barzin
19a41367ba
add reactive resume service [ci skip] 2026-01-28 17:57:39 +00:00
Viktor Barzin
92e58d3b62
increase the num of nvidia slices to 20 [ci skip] 2026-01-26 20:41:59 +00:00
Viktor Barzin
947c5d3d19 Add AFFiNE visual canvas for storytelling
- Deploy AFFiNE as self-hosted visual canvas tool
- Uses shared PostgreSQL and Redis from cluster
- NFS storage for uploads and configuration
- Email configured via mailserver.viktorbarzin.me
- Ingress at affine.viktorbarzin.me

[ci skip]
2026-01-25 21:40:39 +00:00
17 changed files with 3179 additions and 83 deletions

14
main.tf
View file

@ -37,6 +37,7 @@ variable "dbaas_pgadmin_password" {}
variable "drone_github_client_id" {}
variable "drone_github_client_secret" {}
variable "drone_rpc_secret" {}
variable "drone_webhook_secret" {}
variable "dockerhub_registry_password" {}
variable "oauth2_proxy_client_id" {}
variable "oauth2_proxy_client_secret" {}
@ -79,6 +80,7 @@ variable "vaultwarden_smtp_password" {}
variable "resume_database_url" {}
variable "resume_database_password" {}
variable "resume_redis_url" {}
variable "resume_auth_secret" { type = string }
variable "frigate_valchedrym_camera_credentials" { default = "" }
variable "paperless_db_password" {}
variable "diun_nfty_token" {}
@ -139,6 +141,10 @@ variable "freedify_credentials" { type = map(any) }
variable "mcaptcha_postgresql_password" { type = string }
variable "mcaptcha_cookie_secret" { type = string }
variable "mcaptcha_captcha_salt" { type = string }
variable "openrouter_api_key" { type = string }
variable "slack_bot_token" { type = string }
variable "slack_channel" { type = string }
variable "affine_postgresql_password" { type = string }
provider "kubernetes" {
config_path = var.prod ? "" : "~/.kube/config"
@ -437,6 +443,7 @@ module "kubernetes_cluster" {
drone_github_client_id = var.drone_github_client_id
drone_github_client_secret = var.drone_github_client_secret
drone_rpc_secret = var.drone_rpc_secret
drone_webhook_secret = var.drone_webhook_secret
# Oauth proxy
oauth2_proxy_client_id = var.oauth2_proxy_client_id
@ -492,6 +499,7 @@ module "kubernetes_cluster" {
resume_redis_url = var.resume_redis_url
resume_database_password = var.resume_database_password
resume_database_url = var.resume_database_url
resume_auth_secret = var.resume_auth_secret
frigate_valchedrym_camera_credentials = var.frigate_valchedrym_camera_credentials
@ -570,6 +578,12 @@ module "kubernetes_cluster" {
mcaptcha_postgresql_password = var.mcaptcha_postgresql_password
mcaptcha_cookie_secret = var.mcaptcha_cookie_secret
mcaptcha_captcha_salt = var.mcaptcha_captcha_salt
openrouter_api_key = var.openrouter_api_key
slack_bot_token = var.slack_bot_token
slack_channel = var.slack_channel
affine_postgresql_password = var.affine_postgresql_password
}

View file

@ -0,0 +1,217 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "postgresql_password" {}
variable "smtp_password" { type = string }
resource "kubernetes_namespace" "affine" {
metadata {
name = "affine"
}
}
module "tls_secret" {
source = "../setup_tls_secret"
namespace = kubernetes_namespace.affine.metadata[0].name
tls_secret_name = var.tls_secret_name
}
locals {
common_env = [
{
name = "DATABASE_URL"
value = "postgresql://affine:${var.postgresql_password}@postgresql.dbaas.svc.cluster.local:5432/affine"
},
{
name = "REDIS_SERVER_HOST"
value = "redis.redis.svc.cluster.local"
},
{
name = "AFFINE_INDEXER_ENABLED"
value = "false"
},
{
name = "NODE_OPTIONS"
value = "--max-old-space-size=4096"
},
# Server URL configuration
{
name = "AFFINE_SERVER_EXTERNAL_URL"
value = "https://affine.viktorbarzin.me"
},
{
name = "AFFINE_SERVER_HTTPS"
value = "true"
},
# Email/SMTP configuration
{
name = "MAILER_HOST"
value = "mailserver.viktorbarzin.me"
},
{
name = "MAILER_PORT"
value = "587"
},
{
name = "MAILER_USER"
value = "info@viktorbarzin.me"
},
{
name = "MAILER_PASSWORD"
value = var.smtp_password
},
{
name = "MAILER_SENDER"
value = "AFFiNE <info@viktorbarzin.me>"
},
]
}
resource "kubernetes_deployment" "affine" {
metadata {
name = "affine"
namespace = kubernetes_namespace.affine.metadata[0].name
labels = {
app = "affine"
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "affine"
}
}
template {
metadata {
labels = {
app = "affine"
}
}
spec {
# Init container to run database migrations
init_container {
name = "migration"
image = "ghcr.io/toeverything/affine:stable"
command = ["sh", "-c", "node ./scripts/self-host-predeploy.js"]
dynamic "env" {
for_each = local.common_env
content {
name = env.value.name
value = env.value.value
}
}
volume_mount {
name = "data"
mount_path = "/root/.affine/storage"
sub_path = "storage"
}
volume_mount {
name = "data"
mount_path = "/root/.affine/config"
sub_path = "config"
}
}
container {
name = "affine"
image = "ghcr.io/toeverything/affine:stable"
port {
container_port = 3010
}
dynamic "env" {
for_each = local.common_env
content {
name = env.value.name
value = env.value.value
}
}
volume_mount {
name = "data"
mount_path = "/root/.affine/storage"
sub_path = "storage"
}
volume_mount {
name = "data"
mount_path = "/root/.affine/config"
sub_path = "config"
}
resources {
requests = {
memory = "512Mi"
cpu = "100m"
}
limits = {
memory = "4Gi"
cpu = "2"
}
}
liveness_probe {
http_get {
path = "/info"
port = 3010
}
initial_delay_seconds = 120
period_seconds = 30
timeout_seconds = 10
}
readiness_probe {
http_get {
path = "/info"
port = 3010
}
initial_delay_seconds = 60
period_seconds = 10
timeout_seconds = 5
}
}
volume {
name = "data"
nfs {
server = "10.0.10.15"
path = "/mnt/main/affine"
}
}
}
}
}
}
resource "kubernetes_service" "affine" {
metadata {
name = "affine"
namespace = kubernetes_namespace.affine.metadata[0].name
labels = {
app = "affine"
}
}
spec {
selector = {
app = "affine"
}
port {
name = "http"
port = 80
target_port = 3010
}
}
}
module "ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.affine.metadata[0].name
name = "affine"
tls_secret_name = var.tls_secret_name
max_body_size = "500m"
extra_annotations = {
"nginx.ingress.kubernetes.io/proxy-body-size" : "500m"
}
}

View file

@ -202,3 +202,144 @@ module "ingress" {
rybbit_site_id = "d09137795ccc"
}
# CronJob to import public blocklists into CrowdSec
# https://github.com/wolffcatskyy/crowdsec-blocklist-import
# Uses kubectl exec to run in an existing CrowdSec agent pod that's already registered
resource "kubernetes_cron_job_v1" "crowdsec_blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
labels = {
app = "crowdsec-blocklist-import"
tier = var.tier
}
}
spec {
# Run daily at 4 AM
schedule = "0 4 * * *"
timezone = "Europe/London"
concurrency_policy = "Forbid"
successful_jobs_history_limit = 3
failed_jobs_history_limit = 3
job_template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
backoff_limit = 3
template {
metadata {
labels = {
app = "crowdsec-blocklist-import"
}
}
spec {
service_account_name = kubernetes_service_account.blocklist_import.metadata[0].name
restart_policy = "OnFailure"
container {
name = "blocklist-import"
image = "bitnami/kubectl:latest"
command = ["/bin/bash", "-c"]
args = [
<<-EOF
set -e
echo "Finding CrowdSec agent pod..."
AGENT_POD=$(kubectl get pods -n crowdsec -l k8s-app=crowdsec,type=agent -o jsonpath='{.items[0].metadata.name}')
if [ -z "$AGENT_POD" ]; then
echo "ERROR: Could not find CrowdSec agent pod"
exit 1
fi
echo "Using agent pod: $AGENT_POD"
# Download the import script
echo "Downloading blocklist import script..."
curl -fsSL -o /tmp/import.sh \
https://raw.githubusercontent.com/wolffcatskyy/crowdsec-blocklist-import/main/import.sh
chmod +x /tmp/import.sh
# Copy script to agent pod and execute
echo "Copying script to agent pod and executing..."
kubectl cp /tmp/import.sh crowdsec/$AGENT_POD:/tmp/import.sh
kubectl exec -n crowdsec "$AGENT_POD" -- /bin/bash -c '
set -e
# Run with native mode since we are inside the CrowdSec container
export MODE=native
export DECISION_DURATION=24h
export FETCH_TIMEOUT=60
export LOG_LEVEL=INFO
/tmp/import.sh
# Cleanup
rm -f /tmp/import.sh
'
echo "Blocklist import completed successfully!"
EOF
]
}
}
}
}
}
}
}
# Service account for the blocklist import job (needs kubectl exec permissions)
resource "kubernetes_service_account" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}
resource "kubernetes_role" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["get", "list"]
}
rule {
api_groups = [""]
resources = ["pods/exec"]
verbs = ["create"]
}
}
resource "kubernetes_role_binding" "blocklist_import" {
metadata {
name = "crowdsec-blocklist-import"
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = kubernetes_role.blocklist_import.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.blocklist_import.metadata[0].name
namespace = kubernetes_namespace.crowdsec.metadata[0].name
}
}

View file

@ -3,6 +3,7 @@ variable "tier" { type = string }
variable "github_client_id" {}
variable "github_client_secret" {}
variable "rpc_secret" {}
variable "webhook_secret" {}
variable "server_host" {}
variable "server_proto" {}
variable "rpc_host" {
@ -98,6 +99,10 @@ resource "kubernetes_deployment" "drone_server" {
name = "DRONE_RPC_SECRET"
value = var.rpc_secret
}
env {
name = "DRONE_WEBHOOK_SECRET"
value = var.webhook_secret
}
env {
name = "DRONE_SERVER_HOST"
value = var.server_host

View file

@ -5,7 +5,7 @@ variable "homepage_token" {}
variable "immich_version" {
type = string
# Change me to upgrade
default = "v2.4.1"
default = "v2.5.0"
}

View file

@ -22,6 +22,7 @@ variable "dbaas_pgadmin_password" {}
variable "drone_github_client_id" {}
variable "drone_github_client_secret" {}
variable "drone_rpc_secret" {}
variable "drone_webhook_secret" {}
variable "oauth2_proxy_client_id" {}
variable "oauth2_proxy_client_secret" {}
variable "oauth2_proxy_authenticated_emails" {}
@ -63,6 +64,7 @@ variable "vaultwarden_smtp_password" {}
variable "resume_database_url" {}
variable "resume_database_password" {}
variable "resume_redis_url" {}
variable "resume_auth_secret" { type = string }
variable "frigate_valchedrym_camera_credentials" { default = "" }
variable "paperless_db_password" {}
variable "diun_nfty_token" {}
@ -118,6 +120,10 @@ variable "freedify_credentials" { type = map(any) }
variable "mcaptcha_postgresql_password" { type = string }
variable "mcaptcha_cookie_secret" { type = string }
variable "mcaptcha_captcha_salt" { type = string }
variable "openrouter_api_key" { type = string }
variable "slack_bot_token" { type = string }
variable "slack_channel" { type = string }
variable "affine_postgresql_password" { type = string }
variable "defcon_level" {
@ -143,7 +149,7 @@ locals {
"url", "excalidraw", "travel_blog", "dashy", "send", "ytdlp", "wealthfolio", "rybbit", "stirling-pdf",
"networking-toolbox", "navidrome", "freshrss", "forgejo", "tor-proxy", "real-estate-crawler", "n8n",
"changedetection", "linkwarden", "matrix", "homepage", "meshcentral", "diun", "cyberchef", "ntfy", "ollama",
"servarr", "jsoncrack", "paperless-ngx", "frigate", "audiobookshelf", "tandoor", "ebook2audiobook", "netbox", "speedtest", "resume", "freedify", "mcaptcha"
"servarr", "jsoncrack", "paperless-ngx", "frigate", "audiobookshelf", "tandoor", "ebook2audiobook", "netbox", "speedtest", "resume", "freedify", "mcaptcha", "affine"
],
}
active_modules = distinct(flatten([
@ -215,6 +221,7 @@ module "drone" {
github_client_id = var.drone_github_client_id
github_client_secret = var.drone_github_client_secret
rpc_secret = var.drone_rpc_secret
webhook_secret = var.drone_webhook_secret
server_host = "drone.viktorbarzin.me"
server_proto = "https"
tier = local.tiers.edge
@ -539,10 +546,13 @@ module "redis" {
}
module "ytdlp" {
source = "./youtube_dl"
for_each = contains(local.active_modules, "ytdlp") ? { ytdlp = true } : {}
tls_secret_name = var.tls_secret_name
tier = local.tiers.aux
source = "./youtube_dl"
for_each = contains(local.active_modules, "ytdlp") ? { ytdlp = true } : {}
tls_secret_name = var.tls_secret_name
tier = local.tiers.aux
openrouter_api_key = var.openrouter_api_key
slack_bot_token = var.slack_bot_token
slack_channel = var.slack_channel
depends_on = [null_resource.core_services]
}
@ -583,16 +593,15 @@ module "crowdsec" {
crowdsec_dash_machine_password = var.crowdsec_dash_machine_password
}
# Seems like it needs S3 even if pg is local...
# module "resume" {
# source = "./resume"
# tier = local.tiers.aux
# for_each = contains(local.active_modules, "resume") ? { resume = true } : {}
# tls_secret_name = var.tls_secret_name
# redis_url = var.resume_redis_url
# database_url = var.resume_database_url
# db_password = var.resume_database_password
# }
module "resume" {
source = "./resume"
for_each = contains(local.active_modules, "resume") ? { resume = true } : {}
tls_secret_name = var.tls_secret_name
tier = local.tiers.aux
database_url = var.resume_database_url
auth_secret = var.resume_auth_secret
smtp_password = var.mailserver_accounts["info@viktorbarzin.me"]
}
module "uptime-kuma" {
source = "./uptime-kuma"
@ -1062,3 +1071,14 @@ module "freedify" {
for_each = contains(local.active_modules, "freedify") ? { freedify = true } : {}
additional_credentials = var.freedify_credentials
}
module "affine" {
source = "./affine"
for_each = contains(local.active_modules, "affine") ? { affine = true } : {}
tls_secret_name = var.tls_secret_name
postgresql_password = var.affine_postgresql_password
smtp_password = var.mailserver_accounts["info@viktorbarzin.me"]
tier = local.tiers.aux
depends_on = [null_resource.core_services]
}

View file

@ -623,4 +623,9 @@ extraScrapeConfigs: |
action: replace
regex: '(.*)'
replacement: 'nvidia_tesla_t4_$${1}'
- job_name: 'gpu-pod-memory'
static_configs:
- targets:
- "gpu-pod-exporter.nvidia.svc.cluster.local"
metrics_path: '/metrics'

View file

@ -17,6 +17,18 @@ resource "kubernetes_namespace" "nvidia" {
}
}
# Apply GPU taint to ensure only GPU workloads run on GPU node
resource "null_resource" "gpu_node_taint" {
provisioner "local-exec" {
command = "kubectl taint nodes k8s-node1 nvidia.com/gpu=true:NoSchedule --overwrite"
}
# Re-run if namespace changes (proxy for cluster changes)
triggers = {
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
}
# [not needed anymore; part of the chart values] Apply to operator with:
# kubectl patch clusterpolicies.nvidia.com/cluster-policy -n gpu-operator --type merge -p '{"spec": {"devicePlugin": {"config": {"name": "time-slicing-config", "default": "any"}}}}'
@ -36,7 +48,7 @@ resource "kubernetes_config_map" "time_slicing_config" {
failRequestsGreaterThanOne: false
resources:
- name: nvidia.com/gpu
replicas: 10
replicas: 20
EOF
}
depends_on = [kubernetes_namespace.nvidia]
@ -82,6 +94,12 @@ resource "kubernetes_deployment" "nvidia-exporter" {
node_selector = {
"gpu" : "true"
}
toleration {
key = "nvidia.com/gpu"
operator = "Equal"
value = "true"
effect = "NoSchedule"
}
container {
image = "nvidia/dcgm-exporter:latest"
name = "nvidia-exporter"
@ -219,3 +237,274 @@ module "ingress" {
# }
# depends_on = [helm_release.nvidia-gpu-operator]
# }
# GPU Pod Memory Exporter - exposes per-pod GPU memory usage as Prometheus metrics
resource "kubernetes_config_map" "gpu_pod_exporter_script" {
metadata {
name = "gpu-pod-exporter-script"
namespace = kubernetes_namespace.nvidia.metadata[0].name
}
data = {
"exporter.py" = <<-EOF
#!/usr/bin/env python3
"""GPU Pod Memory Exporter - Collects per-pod GPU memory usage."""
import subprocess
import time
import re
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
METRICS_PORT = 9401
SCRAPE_INTERVAL = 15
def get_gpu_processes():
"""Run nvidia-smi to get GPU process info."""
try:
result = subprocess.run(
["nvidia-smi", "--query-compute-apps=pid,used_memory,process_name", "--format=csv,noheader,nounits"],
capture_output=True, text=True, timeout=10
)
if result.returncode != 0:
print(f"nvidia-smi error: {result.stderr}")
return []
processes = []
for line in result.stdout.strip().split('\n'):
if not line.strip():
continue
parts = [p.strip() for p in line.split(',')]
if len(parts) >= 3:
pid, memory_mib, process_name = parts[0], parts[1], parts[2]
processes.append({
'pid': pid,
'memory_bytes': int(memory_mib) * 1024 * 1024,
'process_name': process_name
})
return processes
except Exception as e:
print(f"Error running nvidia-smi: {e}")
return []
def get_container_id(pid):
"""Map PID to container ID via cgroup."""
cgroup_path = f"/host_proc/{pid}/cgroup"
try:
with open(cgroup_path, 'r') as f:
for line in f:
# Match container ID patterns (docker, containerd, cri-o)
# e.g., /kubepods/pod.../containerid or /docker/containerid
match = re.search(r'[:/]([a-f0-9]{64})', line)
if match:
return match.group(1)[:12] # Return short container ID
# Also check for cri-containerd pattern
match = re.search(r'cri-containerd-([a-f0-9]{64})', line)
if match:
return match.group(1)[:12]
except (FileNotFoundError, PermissionError):
pass
return "host"
# Global metrics storage
current_metrics = []
def collect_metrics():
"""Collect GPU memory metrics."""
global current_metrics
metrics = []
processes = get_gpu_processes()
for proc in processes:
container_id = get_container_id(proc['pid'])
metrics.append({
'container_id': container_id,
'pid': proc['pid'],
'process_name': proc['process_name'],
'memory_bytes': proc['memory_bytes']
})
current_metrics = metrics
def format_metrics():
"""Format metrics in Prometheus exposition format."""
lines = [
"# HELP gpu_pod_memory_used_bytes GPU memory used by container",
"# TYPE gpu_pod_memory_used_bytes gauge"
]
for m in current_metrics:
labels = f'container_id="{m["container_id"]}",pid="{m["pid"]}",process_name="{m["process_name"]}"'
lines.append(f"gpu_pod_memory_used_bytes{{{labels}}} {m['memory_bytes']}")
return '\n'.join(lines) + '\n'
class MetricsHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/metrics':
content = format_metrics()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(content.encode())
elif self.path == '/health':
self.send_response(200)
self.end_headers()
self.wfile.write(b'ok')
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args):
pass # Suppress request logging
def background_collector():
"""Background thread to collect metrics periodically."""
import threading
def run():
while True:
collect_metrics()
time.sleep(SCRAPE_INTERVAL)
thread = threading.Thread(target=run, daemon=True)
thread.start()
if __name__ == '__main__':
print(f"Starting GPU Pod Memory Exporter on port {METRICS_PORT}")
collect_metrics() # Initial collection
background_collector()
server = HTTPServer(('', METRICS_PORT), MetricsHandler)
server.serve_forever()
EOF
}
}
resource "kubernetes_daemonset" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
app = "gpu-pod-exporter"
tier = var.tier
}
}
spec {
selector {
match_labels = {
app = "gpu-pod-exporter"
}
}
template {
metadata {
labels = {
app = "gpu-pod-exporter"
}
}
spec {
host_pid = true
node_selector = {
"gpu" : "true"
}
toleration {
key = "nvidia.com/gpu"
operator = "Equal"
value = "true"
effect = "NoSchedule"
}
container {
name = "exporter"
image = "python:3.11-slim"
command = ["/bin/bash", "-c"]
args = [
"python3 /scripts/exporter.py"
]
port {
container_port = 9401
name = "metrics"
}
volume_mount {
name = "scripts"
mount_path = "/scripts"
read_only = true
}
volume_mount {
name = "host-proc"
mount_path = "/host_proc"
read_only = true
}
resources {
requests = {
cpu = "50m"
memory = "128Mi"
}
limits = {
cpu = "200m"
memory = "256Mi"
"nvidia.com/gpu" = "1"
}
}
liveness_probe {
http_get {
path = "/health"
port = 9401
}
initial_delay_seconds = 30
period_seconds = 30
}
}
volume {
name = "scripts"
config_map {
name = kubernetes_config_map.gpu_pod_exporter_script.metadata[0].name
default_mode = "0755"
}
}
volume {
name = "host-proc"
host_path {
path = "/proc"
type = "Directory"
}
}
}
}
}
depends_on = [helm_release.nvidia-gpu-operator]
}
resource "kubernetes_service" "gpu_pod_exporter" {
metadata {
name = "gpu-pod-exporter"
namespace = kubernetes_namespace.nvidia.metadata[0].name
labels = {
app = "gpu-pod-exporter"
}
}
spec {
selector = {
app = "gpu-pod-exporter"
}
port {
name = "metrics"
port = 80
target_port = 9401
}
}
}

View file

@ -1,8 +1,19 @@
variable "tls_secret_name" { type = string }
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "database_url" { type = string }
variable "redis_url" { type = string }
variable "db_password" { type = string }
variable "auth_secret" { type = string }
variable "smtp_password" { type = string }
locals {
namespace = "resume"
app_url = "https://resume.viktorbarzin.me"
}
resource "kubernetes_namespace" "resume" {
metadata {
name = local.namespace
}
}
module "tls_secret" {
source = "../setup_tls_secret"
@ -10,17 +21,103 @@ module "tls_secret" {
tls_secret_name = var.tls_secret_name
}
resource "kubernetes_namespace" "resume" {
# Printer service (browserless chromium for PDF generation)
resource "kubernetes_deployment" "printer" {
metadata {
name = "resume"
name = "printer"
namespace = kubernetes_namespace.resume.metadata[0].name
labels = {
app = "printer"
tier = var.tier
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "printer"
}
}
template {
metadata {
labels = {
app = "printer"
}
}
spec {
container {
name = "printer"
image = "ghcr.io/browserless/chromium:latest"
port {
container_port = 3000
}
env {
name = "HEALTH"
value = "true"
}
env {
name = "CONCURRENT"
value = "20"
}
env {
name = "QUEUED"
value = "10"
}
resources {
requests = {
memory = "256Mi"
cpu = "100m"
}
limits = {
memory = "2Gi"
cpu = "2"
}
}
liveness_probe {
http_get {
path = "/pressure"
port = 3000
}
initial_delay_seconds = 30
period_seconds = 10
timeout_seconds = 5
}
readiness_probe {
http_get {
path = "/pressure"
port = 3000
}
initial_delay_seconds = 10
period_seconds = 10
timeout_seconds = 5
}
}
}
}
}
}
resource "random_string" "random" {
length = 32
lower = true
resource "kubernetes_service" "printer" {
metadata {
name = "printer"
namespace = kubernetes_namespace.resume.metadata[0].name
}
spec {
selector = {
app = "printer"
}
port {
port = 3000
target_port = 3000
}
}
}
# Reactive Resume app
resource "kubernetes_deployment" "resume" {
metadata {
name = "resume"
@ -29,9 +126,6 @@ resource "kubernetes_deployment" "resume" {
app = "resume"
tier = var.tier
}
annotations = {
"reloader.stakater.com/search" = "true"
}
}
spec {
replicas = 1
@ -48,69 +142,113 @@ resource "kubernetes_deployment" "resume" {
}
spec {
container {
image = "amruthpillai/reactive-resume:server-latest"
name = "resume"
image = "amruthpillai/reactive-resume:v5.0.3"
port {
container_port = 3000
}
# Required env vars
env {
name = "APP_URL"
value = local.app_url
}
env {
name = "DATABASE_URL"
value = var.database_url
}
env {
name = "REDIS_URL"
value = var.redis_url
name = "PRINTER_ENDPOINT"
value = "ws://printer.${local.namespace}.svc.cluster.local:3000"
}
env {
name = "PUBLIC_URL"
value = "https://resume.viktorbarzin.me"
}
env {
name = "PUBLIC_SERVER_URL"
value = "https://resume.viktorbarzin.me"
}
env {
name = "POSTGRES_HOST"
value = "postgresql.dbaas.svc.cluster.local"
}
env {
name = "POSTGRES_DB"
value = "resume"
}
env {
name = "POSTGRES_USER"
value = "resume"
}
env {
name = "POSTGRES_PASSWORD"
value = var.db_password
}
env {
name = "JWT_SECRET"
value = random_string.random.result
name = "PRINTER_APP_URL"
value = "http://resume.${local.namespace}.svc.cluster.local"
}
env {
name = "AUTH_SECRET"
value = random_string.random.result
value = var.auth_secret
}
env {
name = "SECRET_KEY"
value = random_string.random.result
}
env {
name = "JWT_EXPIRY_TIME"
value = 604800
}
env {
name = "STORAGE_ENDPOINT"
value = "https://resume.viktorbarzin.me"
}
// There's a tone of these... I give up...
// check https://github.com/AmruthPillai/Reactive-Resume/blob/main/.env.example
port {
container_port = 3000
# Server config
env {
name = "TZ"
value = "Etc/UTC"
}
port {
container_port = 3100
# SMTP config for password reset emails
env {
name = "SMTP_HOST"
value = "mail.viktorbarzin.me"
}
env {
name = "SMTP_PORT"
value = "587"
}
env {
name = "SMTP_USER"
value = "info@viktorbarzin.me"
}
env {
name = "SMTP_PASS"
value = var.smtp_password
}
env {
name = "SMTP_FROM"
value = "Reactive Resume <info@viktorbarzin.me>"
}
env {
name = "SMTP_SECURE"
value = "false"
}
# Feature flags
env {
name = "FLAG_DISABLE_SIGNUPS"
value = "false" # toggle me
}
volume_mount {
name = "data"
mount_path = "/app/data"
}
resources {
requests = {
memory = "256Mi"
cpu = "100m"
}
limits = {
memory = "1Gi"
cpu = "1"
}
}
liveness_probe {
http_get {
path = "/api/health"
port = 3000
}
initial_delay_seconds = 60
period_seconds = 30
timeout_seconds = 10
}
readiness_probe {
http_get {
path = "/api/health"
port = 3000
}
initial_delay_seconds = 30
period_seconds = 10
timeout_seconds = 5
}
}
volume {
name = "data"
nfs {
server = "10.0.10.15"
path = "/mnt/main/resume"
}
}
}
@ -118,22 +256,16 @@ resource "kubernetes_deployment" "resume" {
}
}
resource "kubernetes_service" "resume" {
metadata {
name = "resume"
namespace = kubernetes_namespace.resume.metadata[0].name
labels = {
"app" = "resume"
}
}
spec {
selector = {
app = "resume"
}
port {
name = "http"
port = 80
target_port = 3000
}

View file

@ -1,5 +1,8 @@
variable "tls_secret_name" {}
variable "tier" { type = string }
variable "openrouter_api_key" { type = string }
variable "slack_bot_token" { type = string }
variable "slack_channel" { type = string }
resource "kubernetes_namespace" "ytdlp" {
metadata {
@ -128,3 +131,198 @@ module "ingress" {
"nginx.ingress.kubernetes.io/proxy-body-size" : "0",
}
}
# ----------------------
# yt-highlights service
# ----------------------
resource "kubernetes_secret" "openrouter" {
metadata {
name = "openrouter-credentials"
namespace = kubernetes_namespace.ytdlp.metadata[0].name
}
data = {
"api-key" = var.openrouter_api_key
}
}
resource "kubernetes_secret" "slack" {
metadata {
name = "slack-credentials"
namespace = kubernetes_namespace.ytdlp.metadata[0].name
}
data = {
"bot-token" = var.slack_bot_token
"channel" = var.slack_channel
}
}
resource "kubernetes_deployment" "yt_highlights" {
metadata {
name = "yt-highlights"
namespace = kubernetes_namespace.ytdlp.metadata[0].name
labels = {
app = "yt-highlights"
tier = var.tier
}
annotations = {
"diun.enable" = "true"
}
}
spec {
replicas = 1
strategy {
type = "Recreate"
}
selector {
match_labels = {
app = "yt-highlights"
}
}
template {
metadata {
labels = {
app = "yt-highlights"
}
}
spec {
node_selector = {
"gpu" : "true"
}
container {
name = "yt-highlights"
image = "viktorbarzin/yt-highlights:v20-20260127"
image_pull_policy = "Always"
port {
container_port = 8000
}
env {
name = "ASR_MODEL"
value = "large-v3"
}
env {
name = "ASR_DEVICE"
value = "cuda"
}
env {
name = "OPENROUTER_MODEL"
value = "deepseek/deepseek-r1-0528:free"
}
env {
name = "OPENROUTER_API_KEY"
value_from {
secret_key_ref {
name = kubernetes_secret.openrouter.metadata[0].name
key = "api-key"
}
}
}
env {
name = "DATA_PATH"
value = "/data"
}
env {
name = "SLACK_BOT_TOKEN"
value_from {
secret_key_ref {
name = kubernetes_secret.slack.metadata[0].name
key = "bot-token"
}
}
}
env {
name = "SLACK_CHANNEL"
value_from {
secret_key_ref {
name = kubernetes_secret.slack.metadata[0].name
key = "channel"
}
}
}
env {
name = "REDIS_URL"
value = "redis://redis.redis.svc.cluster.local:6379/0"
}
# Store model cache on NFS to avoid ephemeral storage eviction
env {
name = "HF_HOME"
value = "/data/cache/huggingface"
}
env {
name = "TORCH_HOME"
value = "/data/cache/torch"
}
# Ollama fallback for when OpenRouter models fail
env {
name = "OLLAMA_URL"
value = "http://ollama.ollama.svc.cluster.local:11434"
}
env {
name = "OLLAMA_MODEL"
value = "qwen2.5:14b"
}
volume_mount {
name = "data"
mount_path = "/data"
}
resources {
limits = {
"nvidia.com/gpu" = "1"
}
}
liveness_probe {
http_get {
path = "/health"
port = 8000
}
initial_delay_seconds = 180
period_seconds = 60
timeout_seconds = 60
failure_threshold = 10
}
}
volume {
name = "data"
nfs {
server = "10.0.10.15"
path = "/mnt/main/ytdlp-highlights"
}
}
}
}
}
}
resource "kubernetes_service" "yt_highlights" {
metadata {
name = "yt-highlights"
namespace = kubernetes_namespace.ytdlp.metadata[0].name
labels = {
"app" = "yt-highlights"
}
}
spec {
selector = {
app = "yt-highlights"
}
port {
name = "http"
port = 80
target_port = 8000
protocol = "TCP"
}
}
}
module "highlights_ingress" {
source = "../ingress_factory"
namespace = kubernetes_namespace.ytdlp.metadata[0].name
name = "yt-highlights"
tls_secret_name = var.tls_secret_name
host = "yt-highlights"
protected = true
extra_annotations = {
"nginx.ingress.kubernetes.io/proxy-read-timeout" : "300"
"nginx.ingress.kubernetes.io/proxy-send-timeout" : "300"
}
}

View file

@ -0,0 +1,38 @@
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
ENV PATH="/root/.local/bin:$PATH"
# Install system dependencies
RUN apt-get update && apt-get install -y \
python3 \
python3-pip \
python3-venv \
ffmpeg \
git \
curl \
&& rm -rf /var/lib/apt/lists/*
# Create app directory
WORKDIR /app
# Install Python dependencies
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
# Copy application code
COPY app/ ./app/
# Create data directories
RUN mkdir -p /data/audio /data/transcripts /data/highlights /data/config /data/state
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# Run the application
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]

View file

@ -0,0 +1 @@
# YouTube Highlights Extraction Service

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,667 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>YouTube Highlights</title>
<style>
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
background: #0f0f0f;
color: #f1f1f1;
min-height: 100vh;
padding: 20px;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
h1 {
color: #ff0000;
margin-bottom: 30px;
display: flex;
align-items: center;
gap: 10px;
}
h1::before {
content: "▶";
}
h2 {
color: #aaa;
font-size: 1.1rem;
margin-bottom: 15px;
text-transform: uppercase;
letter-spacing: 1px;
}
.section {
background: #1a1a1a;
border-radius: 12px;
padding: 20px;
margin-bottom: 20px;
}
.form-row {
display: flex;
gap: 10px;
margin-bottom: 15px;
}
input[type="text"], input[type="url"] {
flex: 1;
padding: 12px 16px;
border: 1px solid #333;
border-radius: 8px;
background: #0f0f0f;
color: #f1f1f1;
font-size: 14px;
}
input:focus {
outline: none;
border-color: #ff0000;
}
button {
padding: 12px 24px;
border: none;
border-radius: 8px;
background: #ff0000;
color: white;
font-size: 14px;
font-weight: 600;
cursor: pointer;
transition: background 0.2s;
}
button:hover {
background: #cc0000;
}
button:disabled {
background: #666;
cursor: not-allowed;
}
button.secondary {
background: #333;
}
button.secondary:hover {
background: #444;
}
button.danger {
background: #333;
color: #ff6b6b;
padding: 6px 10px;
font-size: 16px;
min-width: 32px;
flex-shrink: 0;
}
button.danger:hover {
background: #552222;
color: #ff4444;
}
.channel-list {
display: flex;
flex-wrap: wrap;
gap: 10px;
}
.channel-tag {
display: flex;
align-items: center;
gap: 8px;
background: #2a2a2a;
padding: 8px 12px;
border-radius: 20px;
font-size: 13px;
}
.channel-tag .remove {
background: none;
border: none;
color: #888;
cursor: pointer;
padding: 0;
font-size: 16px;
}
.channel-tag .remove:hover {
color: #ff6b6b;
}
.jobs-list {
display: flex;
flex-direction: column;
gap: 10px;
}
.job-item {
display: flex;
align-items: center;
gap: 15px;
padding: 15px;
background: #0f0f0f;
border-radius: 8px;
}
.job-status {
width: 12px;
height: 12px;
border-radius: 50%;
flex-shrink: 0;
}
.job-status.queued { background: #888; }
.job-status.downloading { background: #3498db; animation: pulse 1s infinite; }
.job-status.transcribing { background: #9b59b6; animation: pulse 1s infinite; }
.job-status.analyzing { background: #f39c12; animation: pulse 1s infinite; }
.job-status.completed { background: #5cb85c; }
.job-status.failed { background: #d9534f; }
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.job-info {
flex: 1;
min-width: 0;
}
.job-title {
font-weight: 500;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.job-url {
font-size: 12px;
color: #888;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.job-progress {
font-size: 12px;
color: #aaa;
margin-top: 4px;
}
.job-progress.downloading { color: #3498db; }
.job-progress.transcribing { color: #9b59b6; }
.job-progress.analyzing { color: #f39c12; }
.job-error {
font-size: 12px;
color: #d9534f;
margin-top: 4px;
}
.job-time {
font-size: 11px;
color: #666;
white-space: nowrap;
}
.video-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 20px;
}
.video-card {
background: #0f0f0f;
border-radius: 12px;
overflow: hidden;
}
.video-thumbnail {
width: 100%;
aspect-ratio: 16/9;
object-fit: cover;
background: #333;
}
.video-content {
padding: 15px;
}
.video-title {
font-weight: 600;
margin-bottom: 8px;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
overflow: hidden;
}
.video-title a {
color: #f1f1f1;
text-decoration: none;
}
.video-title a:hover {
color: #ff0000;
}
.video-channel {
font-size: 13px;
color: #aaa;
margin-bottom: 10px;
}
.video-summary {
font-size: 13px;
color: #ccc;
line-height: 1.5;
display: -webkit-box;
-webkit-line-clamp: 3;
-webkit-box-orient: vertical;
overflow: hidden;
}
.highlights-list {
margin-top: 10px;
padding-top: 10px;
border-top: 1px solid #333;
}
.highlight-item {
display: flex;
gap: 10px;
margin-bottom: 8px;
font-size: 12px;
}
.highlight-time {
color: #ff0000;
font-family: monospace;
white-space: nowrap;
}
.highlight-time a {
color: #ff0000;
text-decoration: none;
}
.highlight-time a:hover {
text-decoration: underline;
}
.empty-state {
text-align: center;
padding: 40px;
color: #666;
}
.tabs {
display: flex;
gap: 5px;
margin-bottom: 20px;
}
.tab {
padding: 10px 20px;
background: #1a1a1a;
border: none;
border-radius: 8px 8px 0 0;
color: #888;
cursor: pointer;
}
.tab.active {
background: #2a2a2a;
color: #f1f1f1;
}
.loading {
display: inline-block;
width: 16px;
height: 16px;
border: 2px solid #333;
border-top-color: #ff0000;
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
.message {
padding: 12px 16px;
border-radius: 8px;
margin-bottom: 15px;
}
.message.success {
background: #1a3a1a;
color: #5cb85c;
}
.message.error {
background: #3a1a1a;
color: #d9534f;
}
.health-status {
font-size: 12px;
color: #888;
margin-left: auto;
}
.health-status.healthy {
color: #5cb85c;
}
.health-status.unhealthy {
color: #d9534f;
}
</style>
</head>
<body>
<div class="container">
<h1>YouTube Highlights <span class="health-status" id="healthStatus">Checking...</span></h1>
<div id="message"></div>
<!-- Process Video Section -->
<div class="section">
<h2>Process a Video</h2>
<div class="form-row">
<input type="url" id="videoUrl" placeholder="Paste YouTube URL here..." />
<button id="processBtn" onclick="processVideo()">Process</button>
</div>
</div>
<!-- Channel Subscriptions -->
<div class="section">
<h2>Subscribed Channels</h2>
<div class="form-row">
<input type="text" id="channelId" placeholder="Channel ID or @handle..." />
<button class="secondary" onclick="addChannel()">Add Channel</button>
<button id="checkNewBtn" onclick="checkNewVideos()">Check for New Videos</button>
</div>
<div class="channel-list" id="channelList">
<div class="empty-state">No channels subscribed yet</div>
</div>
</div>
<!-- Active Jobs -->
<div class="section">
<h2>Processing Queue</h2>
<div class="jobs-list" id="jobsList">
<div class="empty-state">No active jobs</div>
</div>
</div>
<!-- Processed Videos -->
<div class="section">
<h2>Processed Videos</h2>
<div class="video-grid" id="videoGrid">
<div class="empty-state">No videos processed yet</div>
</div>
</div>
</div>
<script>
const API_BASE = '';
// Check health on load
async function checkHealth() {
try {
const res = await fetch(`${API_BASE}/health`);
const data = await res.json();
const el = document.getElementById('healthStatus');
if (data.status === 'healthy') {
el.textContent = `Healthy (${data.model} on ${data.device})`;
el.className = 'health-status healthy';
} else {
el.textContent = 'Unhealthy';
el.className = 'health-status unhealthy';
}
} catch (e) {
document.getElementById('healthStatus').textContent = 'Offline';
document.getElementById('healthStatus').className = 'health-status unhealthy';
}
}
function showMessage(text, type = 'success') {
const el = document.getElementById('message');
el.innerHTML = `<div class="message ${type}">${text}</div>`;
setTimeout(() => el.innerHTML = '', 5000);
}
async function processVideo() {
const url = document.getElementById('videoUrl').value.trim();
if (!url) return;
const btn = document.getElementById('processBtn');
btn.disabled = true;
btn.innerHTML = '<span class="loading"></span>';
try {
const res = await fetch(`${API_BASE}/process`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ video_url: url })
});
const data = await res.json();
if (res.ok) {
showMessage(`Job queued: ${data.job_id}`);
document.getElementById('videoUrl').value = '';
loadJobs();
} else {
showMessage(data.detail || 'Failed to process', 'error');
}
} catch (e) {
showMessage('Failed to connect', 'error');
} finally {
btn.disabled = false;
btn.textContent = 'Process';
}
}
async function loadChannels() {
try {
const res = await fetch(`${API_BASE}/channels`);
const data = await res.json();
const el = document.getElementById('channelList');
if (data.channels && data.channels.length > 0) {
el.innerHTML = data.channels.map(ch => `
<div class="channel-tag">
<span>${escapeHtml(ch.name || ch.id)}</span>
<button class="remove" onclick="removeChannel('${ch.id}')">&times;</button>
</div>
`).join('');
} else {
el.innerHTML = '<div class="empty-state">No channels subscribed yet</div>';
}
} catch (e) {
console.error('Failed to load channels', e);
}
}
async function addChannel() {
const id = document.getElementById('channelId').value.trim();
if (!id) return;
try {
const res = await fetch(`${API_BASE}/channels`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ channel_id: id })
});
if (res.ok) {
document.getElementById('channelId').value = '';
loadChannels();
showMessage('Channel added');
} else {
const data = await res.json();
showMessage(data.detail || 'Failed to add channel', 'error');
}
} catch (e) {
showMessage('Failed to connect', 'error');
}
}
async function removeChannel(id) {
try {
await fetch(`${API_BASE}/channels/${encodeURIComponent(id)}`, { method: 'DELETE' });
loadChannels();
} catch (e) {
console.error('Failed to remove channel', e);
}
}
async function checkNewVideos() {
const btn = document.getElementById('checkNewBtn');
btn.disabled = true;
btn.innerHTML = '<span class="loading"></span> Checking...';
try {
const res = await fetch(`${API_BASE}/auto-process`, { method: 'POST' });
const data = await res.json();
if (res.ok) {
const count = data.queued ? data.queued.length : 0;
if (count > 0) {
showMessage(`Found and queued ${count} new video(s) for processing`);
} else {
showMessage(`Checked ${data.channels_checked} channel(s) - no new videos found`);
}
loadJobs();
} else {
showMessage(data.detail || 'Failed to check for new videos', 'error');
}
} catch (e) {
showMessage('Failed to connect', 'error');
} finally {
btn.disabled = false;
btn.textContent = 'Check for New Videos';
}
}
async function deleteJob(jobId) {
try {
await fetch(`${API_BASE}/jobs/${encodeURIComponent(jobId)}`, { method: 'DELETE' });
loadJobs();
} catch (e) {
console.error('Failed to delete job', e);
}
}
async function loadJobs() {
try {
const res = await fetch(`${API_BASE}/jobs`);
const data = await res.json();
const el = document.getElementById('jobsList');
const jobs = data.jobs || [];
// Sort by created_at descending (newest first)
jobs.sort((a, b) => new Date(b.created_at) - new Date(a.created_at));
// Filter to show only recent jobs (last 24 hours) or active ones
const recentJobs = jobs.filter(job => {
const isActive = ['queued', 'downloading', 'transcribing', 'analyzing'].includes(job.status);
const age = Date.now() - new Date(job.created_at).getTime();
const isRecent = age < 24 * 60 * 60 * 1000; // 24 hours
return isActive || isRecent;
});
if (recentJobs.length > 0) {
el.innerHTML = recentJobs.map(job => {
const title = job.video_title || extractVideoId(job.video_url) || 'Processing...';
const timeAgo = formatTimeAgo(job.created_at);
let statusBadge = '';
if (job.status === 'completed') {
statusBadge = '<span style="color:#5cb85c">✓ Completed</span>';
} else if (job.status === 'failed') {
statusBadge = '<span style="color:#d9534f">✗ Failed</span>';
} else {
statusBadge = `<span style="color:#f39c12">${capitalize(job.status)}</span>`;
}
return `
<div class="job-item">
<div class="job-status ${job.status}"></div>
<div class="job-info">
<div class="job-title">${escapeHtml(title)}</div>
<div class="job-url">
Job: ${job.job_id} | ${statusBadge}
</div>
${job.progress ? `<div class="job-progress ${job.status}">${escapeHtml(job.progress)}</div>` : ''}
${job.error ? `<div class="job-error">Error: ${escapeHtml(job.error)}</div>` : ''}
</div>
<div class="job-time">${timeAgo}</div>
<button class="danger" onclick="deleteJob('${job.job_id}')" title="Remove from queue">&times;</button>
</div>
`;
}).join('');
} else {
el.innerHTML = '<div class="empty-state">No active jobs</div>';
}
} catch (e) {
console.error('Failed to load jobs', e);
}
}
function formatTimeAgo(dateStr) {
const date = new Date(dateStr);
const now = new Date();
const diff = Math.floor((now - date) / 1000);
if (diff < 60) return 'Just now';
if (diff < 3600) return `${Math.floor(diff / 60)}m ago`;
if (diff < 86400) return `${Math.floor(diff / 3600)}h ago`;
return `${Math.floor(diff / 86400)}d ago`;
}
function capitalize(str) {
return str.charAt(0).toUpperCase() + str.slice(1);
}
function escapeHtml(str) {
if (!str) return '';
return str.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;');
}
async function loadProcessed() {
try {
const res = await fetch(`${API_BASE}/processed`);
const data = await res.json();
const el = document.getElementById('videoGrid');
if (data.videos && data.videos.length > 0) {
el.innerHTML = data.videos.map(v => {
const highlights = v.highlights || [];
const summary = v.summary || 'No summary available';
const videoId = extractVideoId(v.video_url);
return `
<div class="video-card">
<img class="video-thumbnail" src="https://img.youtube.com/vi/${videoId}/mqdefault.jpg" alt="" />
<div class="video-content">
<div class="video-title">
<a href="${v.video_url}" target="_blank">${escapeHtml(v.video_title || 'Untitled')}</a>
</div>
<div class="video-channel">${formatDuration(v.duration_seconds)}</div>
<div class="video-summary">${escapeHtml(summary)}</div>
${highlights.length > 0 ? `
<div class="highlights-list">
${highlights.slice(0, 5).map(h => `
<div class="highlight-item">
<span class="highlight-time">
<a href="${v.video_url}&t=${h.timestamp_seconds || 0}" target="_blank">
${h.timestamp || formatTime(h.timestamp_seconds)}
</a>
</span>
<span>${escapeHtml(h.title || h.description || '')}</span>
</div>
`).join('')}
</div>
` : ''}
</div>
</div>
`;
}).join('');
} else {
el.innerHTML = '<div class="empty-state">No videos processed yet</div>';
}
} catch (e) {
console.error('Failed to load processed videos', e);
}
}
function formatDuration(seconds) {
if (!seconds) return '';
const h = Math.floor(seconds / 3600);
const m = Math.floor((seconds % 3600) / 60);
const s = Math.floor(seconds % 60);
if (h > 0) return `${h}:${m.toString().padStart(2, '0')}:${s.toString().padStart(2, '0')}`;
return `${m}:${s.toString().padStart(2, '0')}`;
}
function extractVideoId(url) {
const match = url.match(/(?:v=|\/)([\w-]{11})(?:&|$)/);
return match ? match[1] : '';
}
function formatTime(seconds) {
if (!seconds) return '0:00';
const m = Math.floor(seconds / 60);
const s = Math.floor(seconds % 60);
return `${m}:${s.toString().padStart(2, '0')}`;
}
// Initial load
checkHealth();
loadChannels();
loadJobs();
loadProcessed();
// Auto-refresh
setInterval(loadJobs, 5000);
setInterval(loadProcessed, 30000);
</script>
</body>
</html>

View file

@ -0,0 +1,10 @@
fastapi>=0.104.0
uvicorn>=0.24.0
yt-dlp==2025.12.8
faster-whisper>=1.0.0
httpx>=0.25.0
requests>=2.31.0
pydantic>=2.0.0
python-multipart>=0.0.6
feedparser>=6.0.0
redis>=5.0.0

Binary file not shown.

Binary file not shown.