openclaw: switch primary to ChatGPT Plus OAuth (openai-codex/gpt-5.4-mini)

Bumps image 2026.2.26 → 2026.5.4 (openai-codex provider plugin landed in
2026.4.21+). Auth profile is OAuth via the device-pairing flow against the
Codex backend (account ancaelena98@gmail.com); token persists in
/home/node/.openclaw/agents/main/agent/auth-state.json on NFS so it survives
pod restarts. Plus tier accepts gpt-5.4-mini (1,200–7,000 local msgs/5h);
gpt-5-mini and gpt-5.1-codex-mini both return errors on Plus, so we pin
gpt-5.4-mini explicitly. doctor --fix auto-promotes the highest-tier model
(gpt-5-pro) after model discovery, so the container command pins the mini
back as default after doctor runs but before gateway start.
This commit is contained in:
Viktor Barzin 2026-05-06 22:06:32 +00:00
parent 574cdf08d2
commit 115ca184ff

View file

@ -131,8 +131,12 @@ resource "kubernetes_config_map" "openclaw_config" {
mode = "off" mode = "off"
} }
model = { model = {
primary = "nim/qwen/qwen3-coder-480b-a35b-instruct" # ChatGPT Plus OAuth via openai-codex plugin (account: ancaelena98@gmail.com).
fallbacks = ["nim/mistralai/mistral-large-3-675b-instruct-2512", "modelrelay/auto-fastest"] # gpt-5.4-mini is the only mini variant the Codex backend accepts for Plus tier;
# gpt-5-mini / gpt-5.1-codex-mini return model_not_found / "not supported with
# ChatGPT account". Plus rate-card: 1,2007,000 local msgs / 5h on gpt-5.4-mini.
primary = "openai-codex/gpt-5.4-mini"
fallbacks = ["openai-codex/gpt-5.5", "nim/qwen/qwen3-coder-480b-a35b-instruct", "modelrelay/auto-fastest"]
} }
models = { models = {
"modelrelay/auto-fastest" = {} "modelrelay/auto-fastest" = {}
@ -146,6 +150,8 @@ resource "kubernetes_config_map" "openclaw_config" {
"llama-as-openai/Llama-4-Scout-17B-16E-Instruct-FP8" = {} "llama-as-openai/Llama-4-Scout-17B-16E-Instruct-FP8" = {}
"openrouter/stepfun/step-3.5-flash:free" = {} "openrouter/stepfun/step-3.5-flash:free" = {}
"openrouter/arcee-ai/trinity-large-preview:free" = {} "openrouter/arcee-ai/trinity-large-preview:free" = {}
"openai-codex/gpt-5.4-mini" = {}
"openai-codex/gpt-5.5" = {}
} }
} }
} }
@ -383,8 +389,10 @@ resource "kubernetes_deployment" "openclaw" {
# Main container: OpenClaw # Main container: OpenClaw
container { container {
name = "openclaw" name = "openclaw"
image = "ghcr.io/openclaw/openclaw:2026.2.26" image = "ghcr.io/openclaw/openclaw:2026.5.4"
command = ["sh", "-c", "node openclaw.mjs doctor --fix 2>/dev/null; exec node openclaw.mjs gateway --allow-unconfigured --bind lan"] # Doctor --fix auto-promotes the highest-tier codex model (gpt-5-pro) after
# auth-profile-based model discovery; pin gpt-5.4-mini back to default after it.
command = ["sh", "-c", "node openclaw.mjs doctor --fix 2>/dev/null; node openclaw.mjs models set openai-codex/gpt-5.4-mini 2>/dev/null; exec node openclaw.mjs gateway --allow-unconfigured --bind lan"]
port { port {
container_port = 18789 container_port = 18789
} }