add ollama [ci skip]

This commit is contained in:
Viktor Barzin 2025-12-14 09:49:25 +00:00
parent b4f45c7e73
commit d9dfb084c3
2 changed files with 107 additions and 10 deletions

View file

@ -46,16 +46,113 @@ resource "kubernetes_persistent_volume" "ollama-pv" {
}
}
resource "helm_release" "ollama" {
namespace = "ollama"
name = "ollama"
# resource "helm_release" "ollama" {
# namespace = "ollama"
# name = "ollama"
repository = "https://otwld.github.io/ollama-helm/"
chart = "ollama"
atomic = true
# repository = "https://otwld.github.io/ollama-helm/"
# chart = "ollama"
# atomic = true
values = [templatefile("${path.module}/values.yaml", {})]
timeout = 2400
# values = [templatefile("${path.module}/values.yaml", {})]
# timeout = 2400
# }
resource "kubernetes_deployment" "ollama" {
metadata {
name = "ollama"
namespace = "ollama"
labels = {
app = "ollama"
}
}
spec {
replicas = 1
selector {
match_labels = {
app = "ollama"
}
}
template {
metadata {
labels = {
app = "ollama"
}
}
spec {
container {
image = "ollama/ollama:latest"
name = "ollama"
env {
name = "OLLAMA_HOST"
value = "0.0.0.0:11434"
}
env {
name = "PATH"
value = "/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
}
env {
name = "OLLAMA_KEEP_ALIVE"
value = "1h"
}
port {
container_port = 11434
}
volume_mount {
name = "ollama-data"
mount_path = "/root/.ollama"
}
resources {
limits = {
"nvidia.com/gpu" = "1"
}
}
}
volume {
name = "ollama-data"
nfs {
path = "/mnt/main/ollama"
server = "10.0.10.15"
}
}
}
}
}
}
resource "kubernetes_service" "ollama" {
metadata {
name = "ollama"
namespace = "ollama"
labels = {
app = "ollama"
}
}
spec {
selector = {
app = "ollama"
}
port {
name = "http"
port = 11434
}
}
}
# Allow ollama to be connected to from external apps
module "ollama-ingress" {
source = "../ingress_factory"
namespace = "ollama"
name = "ollama-server"
service_name = "ollama"
root_domain = "viktorbarzin.lan"
tls_secret_name = var.tls_secret_name
allow_local_access_only = true
ssl_redirect = false
port = 11434
}
# Web UI
@ -86,7 +183,7 @@ resource "kubernetes_deployment" "ollama-ui" {
name = "ollama-ui"
env {
name = "OLLAMA_BASE_URL"
value = "http://ollama:11434"
value = "http://ollama.ollama.svc.cluster.local:11434"
}
port {

View file

@ -1,7 +1,7 @@
ollama:
gpu:
# -- Enable GPU integration
enabled: false
enabled: true
# -- GPU type: 'nvidia' or 'amd'
type: "nvidia"