Add Claude commands, remote-exec script, and skills
- commands/remote.md for remote execution - remote-exec.sh script - skills: claudeception, kubernetes-latest-tag-image-pull, react-hooks-order-early-return
This commit is contained in:
parent
09631b3530
commit
fde360dbdc
44 changed files with 3053 additions and 0 deletions
55
dot_claude/commands/remote.md
Normal file
55
dot_claude/commands/remote.md
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
name: remote
|
||||
description: Execute commands on the remote dev VM (10.0.10.10)
|
||||
---
|
||||
|
||||
# Remote Command Execution Skill
|
||||
|
||||
Execute commands on the remote host via the remote-exec daemon.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The daemon must be running in a terminal (outside sandbox):
|
||||
```bash
|
||||
~/.claude/remote-exec.sh daemon
|
||||
```
|
||||
|
||||
## Instructions
|
||||
|
||||
When the user invokes `/remote <command>`:
|
||||
|
||||
1. Generate a unique filename: `cmd-$(date +%s%N).txt`
|
||||
2. Write the command to `~/.claude/remote-commands/<filename>`
|
||||
3. Wait for result in `~/.claude/remote-results/<filename>`
|
||||
4. Display the result (ends with `---` and `EXIT_CODE: <n>`)
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
/remote pytest tests/ -v
|
||||
/remote python main.py dump-listings
|
||||
/remote "cd frontend && npm run build"
|
||||
```
|
||||
|
||||
## Parallel Execution
|
||||
|
||||
For parallel commands, first line should be `--parallel`:
|
||||
```
|
||||
--parallel
|
||||
pytest tests/unit/
|
||||
pytest tests/integration/
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Default host/workdir is built into the script. To change:
|
||||
```bash
|
||||
~/.claude/remote-exec.sh config <host> <workdir>
|
||||
```
|
||||
|
||||
## Execution Steps
|
||||
|
||||
1. Write command to `~/.claude/remote-commands/cmd-<timestamp>.txt`
|
||||
2. Poll `~/.claude/remote-results/` every 0.5s (timeout: 120s)
|
||||
3. Once EXIT_CODE appears, display output
|
||||
4. Report success/failure based on exit code
|
||||
344
dot_claude/executable_remote-exec.sh
Normal file
344
dot_claude/executable_remote-exec.sh
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
#!/bin/bash
|
||||
# Simple Remote Execution Daemon
|
||||
#
|
||||
# Usage:
|
||||
# remote-exec.sh daemon - Start the daemon
|
||||
# remote-exec.sh status - Show status and test connection
|
||||
# remote-exec.sh config <host> - Set default host
|
||||
# remote-exec.sh <workdir> exec <command> - Execute and wait for result
|
||||
# remote-exec.sh <workdir> run <command> - Queue command (async)
|
||||
#
|
||||
# Each command carries its own workdir, so the daemon can handle
|
||||
# commands for different directories.
|
||||
#
|
||||
# Results written to ~/.claude/remote-results/<filename>
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
BASE_DIR="$HOME/.claude"
|
||||
CONFIG_FILE="$BASE_DIR/remote-config"
|
||||
COMMANDS_DIR="$BASE_DIR/remote-commands"
|
||||
RESULTS_DIR="$BASE_DIR/remote-results"
|
||||
POLL_INTERVAL=0.5
|
||||
MAX_RESULTS=50
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
error() { echo -e "${RED}Error: $1${NC}" >&2; exit 1; }
|
||||
info() { echo -e "${GREEN}$1${NC}" >&2; }
|
||||
warn() { echo -e "${YELLOW}$1${NC}" >&2; }
|
||||
|
||||
mkdir -p "$COMMANDS_DIR" "$RESULTS_DIR"
|
||||
|
||||
# Default config
|
||||
DEFAULT_HOST="10.0.10.10"
|
||||
|
||||
get_config() {
|
||||
if [[ -f "$CONFIG_FILE" ]]; then
|
||||
source "$CONFIG_FILE"
|
||||
fi
|
||||
HOST="${HOST:-$DEFAULT_HOST}"
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat << 'EOF'
|
||||
Simple Remote Execution Daemon
|
||||
|
||||
Usage:
|
||||
remote-exec.sh daemon Start the daemon
|
||||
remote-exec.sh status Show status
|
||||
remote-exec.sh config <host> Set default host
|
||||
remote-exec.sh <workdir> exec <command> Execute and wait
|
||||
remote-exec.sh <workdir> run <command> Queue command (async)
|
||||
|
||||
Examples:
|
||||
remote-exec.sh daemon
|
||||
remote-exec.sh /home/user/project exec "pytest tests/ -v"
|
||||
remote-exec.sh /home/user/project run "python main.py dump-listings"
|
||||
remote-exec.sh config 10.0.10.10
|
||||
|
||||
Command files: Just drop a file in ~/.claude/remote-commands/
|
||||
- Content is the command(s) to run
|
||||
- Use "--parallel" on first line for parallel execution
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
cmd_config() {
|
||||
local host="$1"
|
||||
|
||||
cat > "$CONFIG_FILE" << EOF
|
||||
HOST="$host"
|
||||
EOF
|
||||
info "Config saved: host=$host"
|
||||
|
||||
echo -n "Testing SSH connection... "
|
||||
if ssh -o ConnectTimeout=5 -o BatchMode=yes "$host" "echo ok" >/dev/null 2>&1; then
|
||||
info "OK"
|
||||
else
|
||||
warn "SSH test failed"
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_status() {
|
||||
get_config
|
||||
echo "Remote Executor Status"
|
||||
echo " Host: $HOST"
|
||||
echo " Workdir: $WORKDIR"
|
||||
echo " Commands dir: $COMMANDS_DIR"
|
||||
echo " Results dir: $RESULTS_DIR"
|
||||
|
||||
local pending=$(ls -1 "$COMMANDS_DIR"/*.txt 2>/dev/null | wc -l | tr -d ' ')
|
||||
local results=$(ls -1 "$RESULTS_DIR"/*.txt 2>/dev/null | wc -l | tr -d ' ')
|
||||
echo " Pending commands: $pending"
|
||||
echo " Cached results: $results"
|
||||
|
||||
echo -n " SSH connection: "
|
||||
if ssh -o ConnectTimeout=3 -o BatchMode=yes "$HOST" "echo ok" >/dev/null 2>&1; then
|
||||
echo -e "${GREEN}OK${NC}"
|
||||
else
|
||||
echo -e "${RED}FAILED${NC}"
|
||||
fi
|
||||
|
||||
# Check if daemon is running
|
||||
echo -n " Daemon: "
|
||||
if pgrep -f "remote-exec.sh daemon" >/dev/null 2>&1; then
|
||||
echo -e "${GREEN}running${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}not running${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_run() {
|
||||
local command="$*"
|
||||
|
||||
local cmd_id="cmd-$(date +%s%N)"
|
||||
local cmd_file="$COMMANDS_DIR/$cmd_id.txt"
|
||||
|
||||
# Store workdir and command (first line is workdir, rest is command)
|
||||
{
|
||||
echo "WORKDIR:$WORKDIR"
|
||||
echo "$command"
|
||||
} > "$cmd_file"
|
||||
echo "$cmd_id"
|
||||
}
|
||||
|
||||
cmd_exec() {
|
||||
local command="$*"
|
||||
|
||||
local cmd_id=$(cmd_run "$command")
|
||||
local result_file="$RESULTS_DIR/$cmd_id.txt"
|
||||
|
||||
# Wait for result
|
||||
local timeout=240
|
||||
local elapsed=0
|
||||
while [[ ! -f "$result_file" ]] && [[ $elapsed -lt $timeout ]]; do
|
||||
sleep 0.5
|
||||
elapsed=$((elapsed + 1))
|
||||
done
|
||||
|
||||
if [[ ! -f "$result_file" ]]; then
|
||||
error "Timeout waiting for result (is daemon running?)"
|
||||
fi
|
||||
|
||||
# Wait for completion
|
||||
while ! grep -q "^EXIT_CODE:" "$result_file" 2>/dev/null; do
|
||||
sleep 0.5
|
||||
elapsed=$((elapsed + 1))
|
||||
if [[ $elapsed -ge $timeout ]]; then
|
||||
error "Timeout waiting for command completion"
|
||||
fi
|
||||
done
|
||||
|
||||
cat "$result_file"
|
||||
}
|
||||
|
||||
run_daemon() {
|
||||
get_config
|
||||
|
||||
echo -e "${BLUE}Remote Execution Daemon${NC}"
|
||||
echo " Host: $HOST"
|
||||
echo " Workdir: (per-command)"
|
||||
echo " Commands dir: $COMMANDS_DIR"
|
||||
echo " Results dir: $RESULTS_DIR"
|
||||
echo " Polling every ${POLL_INTERVAL}s"
|
||||
echo ""
|
||||
|
||||
# Test SSH connectivity
|
||||
echo "Testing SSH connection..."
|
||||
echo " Host: $HOST"
|
||||
|
||||
SSH_OUTPUT=$(ssh -v -o ConnectTimeout=5 -o BatchMode=yes "$HOST" "echo ok" 2>&1)
|
||||
SSH_EXIT=$?
|
||||
|
||||
if [[ $SSH_EXIT -eq 0 ]]; then
|
||||
echo -e " SSH: ${GREEN}OK${NC}"
|
||||
else
|
||||
echo -e " SSH: ${RED}FAILED (exit code: $SSH_EXIT)${NC}"
|
||||
echo " --- SSH Debug Output ---"
|
||||
echo "$SSH_OUTPUT" | grep -E "(debug1: Authentications|debug1: Trying|debug1: Authentication|Permission denied|Connection refused|No route|Could not resolve)" | head -20
|
||||
echo " -------------------------"
|
||||
warn "SSH connection failed - commands will fail until this is fixed"
|
||||
echo ""
|
||||
echo "Troubleshooting:"
|
||||
echo " 1. Check SSH agent: ssh-add -l"
|
||||
echo " 2. Test manually: ssh $HOST hostname"
|
||||
echo " 3. Check key permissions: ls -la ~/.ssh/"
|
||||
fi
|
||||
echo ""
|
||||
echo "Waiting for commands..."
|
||||
|
||||
while true; do
|
||||
for cmd_file in "$COMMANDS_DIR"/*.txt; do
|
||||
[[ -e "$cmd_file" ]] || continue
|
||||
|
||||
filename=$(basename "$cmd_file" .txt)
|
||||
result_file="$RESULTS_DIR/$filename.txt"
|
||||
|
||||
# Read workdir from first line if present (format: WORKDIR:/path)
|
||||
first_line=$(head -n 1 "$cmd_file")
|
||||
if [[ "$first_line" == WORKDIR:* ]]; then
|
||||
CMD_WORKDIR="${first_line#WORKDIR:}"
|
||||
remaining=$(tail -n +2 "$cmd_file")
|
||||
else
|
||||
CMD_WORKDIR="$WORKDIR"
|
||||
remaining=$(cat "$cmd_file")
|
||||
fi
|
||||
|
||||
# Check for --parallel flag
|
||||
first_remaining=$(echo "$remaining" | head -n 1)
|
||||
if [[ "$first_remaining" == "--parallel" ]]; then
|
||||
PARALLEL=true
|
||||
commands=$(echo "$remaining" | tail -n +2)
|
||||
else
|
||||
PARALLEL=false
|
||||
commands="$remaining"
|
||||
fi
|
||||
|
||||
# Preview
|
||||
cmd_preview=$(echo "$commands" | head -n 1 | head -c 60)
|
||||
if [[ "$PARALLEL" == "true" ]]; then
|
||||
echo -n "[$(date '+%H:%M:%S')] [parallel] $cmd_preview... "
|
||||
else
|
||||
echo -n "[$(date '+%H:%M:%S')] $cmd_preview... "
|
||||
fi
|
||||
|
||||
# Execute
|
||||
{
|
||||
echo "=== Remote Execution ==="
|
||||
echo "Host: $HOST"
|
||||
echo "Workdir: $CMD_WORKDIR"
|
||||
echo "Command: $commands"
|
||||
echo "========================"
|
||||
echo ""
|
||||
|
||||
if [[ "$PARALLEL" == "true" ]]; then
|
||||
PIDS=()
|
||||
while IFS= read -r cmd || [[ -n "$cmd" ]]; do
|
||||
[[ -z "$cmd" ]] && continue
|
||||
ssh -v -o BatchMode=yes "$HOST" "cd '$CMD_WORKDIR' && $cmd" 2>&1 &
|
||||
PIDS+=($!)
|
||||
done <<< "$commands"
|
||||
|
||||
FINAL_EXIT=0
|
||||
for pid in "${PIDS[@]}"; do
|
||||
if ! wait "$pid"; then
|
||||
FINAL_EXIT=1
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "EXIT_CODE: $FINAL_EXIT"
|
||||
else
|
||||
FINAL_EXIT=0
|
||||
while IFS= read -r cmd || [[ -n "$cmd" ]]; do
|
||||
[[ -z "$cmd" ]] && continue
|
||||
if ! ssh -v -o BatchMode=yes "$HOST" "cd '$CMD_WORKDIR' && $cmd" 2>&1; then
|
||||
FINAL_EXIT=$?
|
||||
fi
|
||||
done <<< "$commands"
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "EXIT_CODE: $FINAL_EXIT"
|
||||
fi
|
||||
} > "$result_file" 2>&1
|
||||
|
||||
rm "$cmd_file"
|
||||
|
||||
# Display status
|
||||
exit_code=$(grep "^EXIT_CODE:" "$result_file" | tail -1 | cut -d' ' -f2)
|
||||
if [[ "$exit_code" == "0" ]]; then
|
||||
echo -e "${GREEN}ok${NC}"
|
||||
else
|
||||
echo -e "${RED}failed (exit: $exit_code)${NC}"
|
||||
fi
|
||||
|
||||
# Cleanup old results
|
||||
result_count=$(ls -1 "$RESULTS_DIR" 2>/dev/null | wc -l)
|
||||
if [[ $result_count -gt $MAX_RESULTS ]]; then
|
||||
ls -1t "$RESULTS_DIR" | tail -n +$((MAX_RESULTS + 1)) | while read -r old; do
|
||||
rm -f "$RESULTS_DIR/$old"
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
sleep "$POLL_INTERVAL"
|
||||
done
|
||||
}
|
||||
|
||||
# Main dispatch
|
||||
[[ $# -ge 1 ]] || usage
|
||||
|
||||
# Handle special commands that don't need workdir
|
||||
case "$1" in
|
||||
config|setup)
|
||||
shift
|
||||
[[ $# -ge 1 ]] || error "Usage: remote-exec.sh config <host>"
|
||||
cmd_config "$1"
|
||||
exit 0
|
||||
;;
|
||||
daemon|start)
|
||||
get_config
|
||||
WORKDIR="" # Daemon reads workdir from each command file
|
||||
run_daemon
|
||||
exit 0
|
||||
;;
|
||||
status|info)
|
||||
get_config
|
||||
WORKDIR="(per-command)"
|
||||
cmd_status
|
||||
exit 0
|
||||
;;
|
||||
help|--help|-h)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
# exec/run commands require workdir as first param
|
||||
[[ $# -ge 2 ]] || usage
|
||||
|
||||
WORKDIR="$1"
|
||||
shift
|
||||
ACTION="$1"
|
||||
shift
|
||||
|
||||
get_config
|
||||
|
||||
case "$ACTION" in
|
||||
run|queue|async)
|
||||
[[ $# -ge 1 ]] || error "Usage: remote-exec.sh <workdir> run <command>"
|
||||
cmd_run "$@"
|
||||
;;
|
||||
exec|execute|sync)
|
||||
[[ $# -ge 1 ]] || error "Usage: remote-exec.sh <workdir> exec <command>"
|
||||
cmd_exec "$@"
|
||||
;;
|
||||
*)
|
||||
error "Unknown action: $ACTION"
|
||||
;;
|
||||
esac
|
||||
21
dot_claude/skills/claudeception/LICENSE
Normal file
21
dot_claude/skills/claudeception/LICENSE
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2024 Claude Code
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
187
dot_claude/skills/claudeception/README.md
Normal file
187
dot_claude/skills/claudeception/README.md
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
# Claudeception
|
||||
|
||||
Every time you use an AI coding agent, it starts from zero. You spend an hour debugging some obscure error, the agent figures it out, session ends. Next time you hit the same issue? Another hour.
|
||||
|
||||
This skill fixes that. When Claude Code discovers something non-obvious (a debugging technique, a workaround, some project-specific pattern), it saves that knowledge as a new skill. Next time a similar problem comes up, the skill gets loaded automatically.
|
||||
|
||||
## Installation
|
||||
|
||||
### Step 1: Clone the skill
|
||||
|
||||
**User-level (recommended)**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/blader/Claudeception.git ~/.claude/skills/claudeception
|
||||
```
|
||||
|
||||
**Project-level**
|
||||
|
||||
```bash
|
||||
git clone https://github.com/blader/Claudeception.git .claude/skills/claudeception
|
||||
```
|
||||
|
||||
### Step 2: Set up the activation hook (recommended)
|
||||
|
||||
The skill can activate via semantic matching, but a hook ensures it evaluates every session for extractable knowledge.
|
||||
|
||||
#### User-level setup (recommended)
|
||||
|
||||
1. Create the hooks directory and copy the script:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.claude/hooks
|
||||
cp ~/.claude/skills/claudeception/scripts/claudeception-activator.sh ~/.claude/hooks/
|
||||
chmod +x ~/.claude/hooks/claudeception-activator.sh
|
||||
```
|
||||
|
||||
2. Add the hook to your global Claude settings (`~/.claude/settings.json`):
|
||||
|
||||
```json
|
||||
{
|
||||
"hooks": {
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "~/.claude/hooks/claudeception-activator.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Project-level setup
|
||||
|
||||
1. Create the hooks directory inside your project and copy the script:
|
||||
|
||||
```bash
|
||||
mkdir -p .claude/hooks
|
||||
cp .claude/skills/claudeception/scripts/claudeception-activator.sh .claude/hooks/
|
||||
chmod +x .claude/hooks/claudeception-activator.sh
|
||||
```
|
||||
|
||||
2. Add the hook to your project settings (`.claude/settings.json` in the repo):
|
||||
|
||||
```json
|
||||
{
|
||||
"hooks": {
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": ".claude/hooks/claudeception-activator.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you already have a `settings.json`, merge the `hooks` configuration into it.
|
||||
|
||||
The hook injects a reminder on every prompt that tells Claude to evaluate whether the current task produced extractable knowledge. This achieves higher activation rates than relying on semantic description matching alone.
|
||||
|
||||
## Usage
|
||||
|
||||
### Automatic Mode
|
||||
|
||||
The skill activates automatically when Claude Code:
|
||||
- Just completed debugging and discovered a non-obvious solution
|
||||
- Found a workaround through investigation or trial-and-error
|
||||
- Resolved an error where the root cause wasn't immediately apparent
|
||||
- Learned project-specific patterns or configurations through investigation
|
||||
- Completed any task where the solution required meaningful discovery
|
||||
|
||||
### Explicit Mode
|
||||
|
||||
Trigger a learning retrospective:
|
||||
|
||||
```
|
||||
/claudeception
|
||||
```
|
||||
|
||||
Or explicitly request skill extraction:
|
||||
|
||||
```
|
||||
Save what we just learned as a skill
|
||||
```
|
||||
|
||||
### What Gets Extracted
|
||||
|
||||
Not every task produces a skill. It only extracts knowledge that required actual discovery (not just reading docs), will help with future tasks, has clear trigger conditions, and has been verified to work.
|
||||
|
||||
## Research
|
||||
|
||||
The idea comes from academic work on skill libraries for AI agents.
|
||||
|
||||
[Voyager](https://arxiv.org/abs/2305.16291) (Wang et al., 2023) showed that game-playing agents can build up libraries of reusable skills over time, and that this helps them avoid re-learning things they already figured out. [CASCADE](https://arxiv.org/abs/2512.23880) (2024) introduced "meta-skills" (skills for acquiring skills), which is what this is. [SEAgent](https://arxiv.org/abs/2508.04700) (2025) showed agents can learn new software environments through trial and error, which inspired the retrospective feature. [Reflexion](https://arxiv.org/abs/2303.11366) (Shinn et al., 2023) showed that self-reflection helps.
|
||||
|
||||
Agents that persist what they learn do better than agents that start fresh.
|
||||
|
||||
## How It Works
|
||||
|
||||
Claude Code has a native skills system. At startup, it loads skill names and descriptions (about 100 tokens each). When you're working, it matches your current context against those descriptions and pulls in relevant skills.
|
||||
|
||||
But this retrieval system can be written to, not just read from. So when this skill notices extractable knowledge, it writes a new skill with a description optimized for future retrieval.
|
||||
|
||||
The description matters a lot. "Helps with database problems" won't match anything useful. "Fix for PrismaClientKnownRequestError in serverless" will match when someone hits that error.
|
||||
|
||||
More on the skills architecture [here](https://www.anthropic.com/engineering/equipping-agents-for-the-real-world-with-agent-skills).
|
||||
|
||||
## Skill Format
|
||||
|
||||
Extracted skills are markdown files with YAML frontmatter:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: prisma-connection-pool-exhaustion
|
||||
description: |
|
||||
Fix for PrismaClientKnownRequestError: Too many database connections
|
||||
in serverless environments (Vercel, AWS Lambda). Use when connection
|
||||
count errors appear after ~5 concurrent requests.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2024-01-15
|
||||
---
|
||||
|
||||
# Prisma Connection Pool Exhaustion
|
||||
|
||||
## Problem
|
||||
[What this skill solves]
|
||||
|
||||
## Context / Trigger Conditions
|
||||
[Exact error messages, symptoms, scenarios]
|
||||
|
||||
## Solution
|
||||
[Step-by-step fix]
|
||||
|
||||
## Verification
|
||||
[How to confirm it worked]
|
||||
```
|
||||
|
||||
See `resources/skill-template.md` for the full template.
|
||||
|
||||
## Quality Gates
|
||||
|
||||
The skill is picky about what it extracts. If something is just a documentation lookup, or only useful for this one case, or hasn't actually been tested, it won't create a skill. Would this actually help someone who hits this problem in six months? If not, no skill.
|
||||
|
||||
## Examples
|
||||
|
||||
See `examples/` for sample skills:
|
||||
|
||||
- `nextjs-server-side-error-debugging/`: errors that don't show in browser console
|
||||
- `prisma-connection-pool-exhaustion/`: the "too many connections" serverless problem
|
||||
- `typescript-circular-dependency/`: detecting and fixing import cycles
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome. Fork, make changes, submit a PR.
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
389
dot_claude/skills/claudeception/SKILL.md
Normal file
389
dot_claude/skills/claudeception/SKILL.md
Normal file
|
|
@ -0,0 +1,389 @@
|
|||
---
|
||||
name: claudeception
|
||||
description: |
|
||||
Claudeception is a continuous learning system that extracts reusable knowledge from work sessions.
|
||||
Triggers: (1) /claudeception command to review session learnings, (2) "save this as a skill"
|
||||
or "extract a skill from this", (3) "what did we learn?", (4) After any task involving
|
||||
non-obvious debugging, workarounds, or trial-and-error discovery. Creates new Claude Code
|
||||
skills when valuable, reusable knowledge is identified.
|
||||
author: Claude Code
|
||||
version: 3.0.0
|
||||
allowed-tools:
|
||||
- Read
|
||||
- Write
|
||||
- Edit
|
||||
- Grep
|
||||
- Glob
|
||||
- WebSearch
|
||||
- WebFetch
|
||||
- Skill
|
||||
- AskUserQuestion
|
||||
- TodoWrite
|
||||
---
|
||||
|
||||
# Claudeception
|
||||
|
||||
You are Claudeception: a continuous learning system that extracts reusable knowledge from work sessions and
|
||||
codifies it into new Claude Code skills. This enables autonomous improvement over time.
|
||||
|
||||
## Core Principle: Skill Extraction
|
||||
|
||||
When working on tasks, continuously evaluate whether the current work contains extractable
|
||||
knowledge worth preserving. Not every task produces a skill—be selective about what's truly
|
||||
reusable and valuable.
|
||||
|
||||
## When to Extract a Skill
|
||||
|
||||
Extract a skill when you encounter:
|
||||
|
||||
1. **Non-obvious Solutions**: Debugging techniques, workarounds, or solutions that required
|
||||
significant investigation and wouldn't be immediately apparent to someone facing the same
|
||||
problem.
|
||||
|
||||
2. **Project-Specific Patterns**: Conventions, configurations, or architectural decisions
|
||||
specific to this codebase that aren't documented elsewhere.
|
||||
|
||||
3. **Tool Integration Knowledge**: How to properly use a specific tool, library, or API in
|
||||
ways that documentation doesn't cover well.
|
||||
|
||||
4. **Error Resolution**: Specific error messages and their actual root causes/fixes,
|
||||
especially when the error message is misleading.
|
||||
|
||||
5. **Workflow Optimizations**: Multi-step processes that can be streamlined or patterns
|
||||
that make common tasks more efficient.
|
||||
|
||||
## Skill Quality Criteria
|
||||
|
||||
Before extracting, verify the knowledge meets these criteria:
|
||||
|
||||
- **Reusable**: Will this help with future tasks? (Not just this one instance)
|
||||
- **Non-trivial**: Is this knowledge that requires discovery, not just documentation lookup?
|
||||
- **Specific**: Can you describe the exact trigger conditions and solution?
|
||||
- **Verified**: Has this solution actually worked, not just theoretically?
|
||||
|
||||
## Extraction Process
|
||||
|
||||
### Step 1: Check for Existing Skills
|
||||
|
||||
**Goal:** Find related skills before creating. Decide: update or create new.
|
||||
|
||||
```sh
|
||||
# Skill directories (project-first, then user-level)
|
||||
SKILL_DIRS=(
|
||||
".claude/skills"
|
||||
"$HOME/.claude/skills"
|
||||
"$HOME/.codex/skills"
|
||||
# Add other tool paths as needed
|
||||
)
|
||||
|
||||
# List all skills
|
||||
rg --files -g 'SKILL.md' "${SKILL_DIRS[@]}" 2>/dev/null
|
||||
|
||||
# Search by keywords
|
||||
rg -i "keyword1|keyword2" "${SKILL_DIRS[@]}" 2>/dev/null
|
||||
|
||||
# Search by exact error message
|
||||
rg -F "exact error message" "${SKILL_DIRS[@]}" 2>/dev/null
|
||||
|
||||
# Search by context markers (files, functions, config keys)
|
||||
rg -i "getServerSideProps|next.config.js|prisma.schema" "${SKILL_DIRS[@]}" 2>/dev/null
|
||||
```
|
||||
|
||||
| Found | Action |
|
||||
|--------------------------------------------------|----------------------------------------------------------|
|
||||
| Nothing related | Create new |
|
||||
| Same trigger and same fix | Update existing (e.g., `version: 1.0.0` → `1.1.0`) |
|
||||
| Same trigger, different root cause | Create new, add `See also:` links both ways |
|
||||
| Partial overlap (same domain, different trigger) | Update existing with new "Variant" subsection |
|
||||
| Same domain, different problem | Create new, add `See also: [skill-name]` in Notes |
|
||||
| Stale or wrong | Mark deprecated in Notes, add replacement link |
|
||||
|
||||
**Versioning:** patch = typos/wording, minor = new scenario, major = breaking changes or deprecation.
|
||||
|
||||
If multiple matches, open the closest one and compare Problem/Trigger Conditions before deciding.
|
||||
|
||||
### Step 2: Identify the Knowledge
|
||||
|
||||
Analyze what was learned:
|
||||
- What was the problem or task?
|
||||
- What was non-obvious about the solution?
|
||||
- What would someone need to know to solve this faster next time?
|
||||
- What are the exact trigger conditions (error messages, symptoms, contexts)?
|
||||
|
||||
### Step 3: Research Best Practices (When Appropriate)
|
||||
|
||||
Before creating the skill, search the web for current information when:
|
||||
|
||||
**Always search for:**
|
||||
- Technology-specific best practices (frameworks, libraries, tools)
|
||||
- Current documentation or API changes
|
||||
- Common patterns or solutions for similar problems
|
||||
- Known gotchas or pitfalls in the problem domain
|
||||
- Alternative approaches or solutions
|
||||
|
||||
**When to search:**
|
||||
- The topic involves specific technologies, frameworks, or tools
|
||||
- You're uncertain about current best practices
|
||||
- The solution might have changed after January 2025 (knowledge cutoff)
|
||||
- There might be official documentation or community standards
|
||||
- You want to verify your understanding is current
|
||||
|
||||
**When to skip searching:**
|
||||
- Project-specific internal patterns unique to this codebase
|
||||
- Solutions that are clearly context-specific and wouldn't be documented
|
||||
- Generic programming concepts that are stable and well-understood
|
||||
- Time-sensitive situations where the skill needs to be created immediately
|
||||
|
||||
**Search strategy:**
|
||||
```
|
||||
1. Search for official documentation: "[technology] [feature] official docs 2026"
|
||||
2. Search for best practices: "[technology] [problem] best practices 2026"
|
||||
3. Search for common issues: "[technology] [error message] solution 2026"
|
||||
4. Review top results and incorporate relevant information
|
||||
5. Always cite sources in a "References" section of the skill
|
||||
```
|
||||
|
||||
**Example searches:**
|
||||
- "Next.js getServerSideProps error handling best practices 2026"
|
||||
- "Claude Code skill description semantic matching 2026"
|
||||
- "React useEffect cleanup patterns official docs 2026"
|
||||
|
||||
**Integration with skill content:**
|
||||
- Add a "References" section at the end of the skill with source URLs
|
||||
- Incorporate best practices into the "Solution" section
|
||||
- Include warnings about deprecated patterns in the "Notes" section
|
||||
- Mention official recommendations where applicable
|
||||
|
||||
### Step 4: Structure the Skill
|
||||
|
||||
Create a new skill with this structure:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: [descriptive-kebab-case-name]
|
||||
description: |
|
||||
[Precise description including: (1) exact use cases, (2) trigger conditions like
|
||||
specific error messages or symptoms, (3) what problem this solves. Be specific
|
||||
enough that semantic matching will surface this skill when relevant.]
|
||||
author: [original-author or "Claude Code"]
|
||||
version: 1.0.0
|
||||
date: [YYYY-MM-DD]
|
||||
---
|
||||
|
||||
# [Skill Name]
|
||||
|
||||
## Problem
|
||||
[Clear description of the problem this skill addresses]
|
||||
|
||||
## Context / Trigger Conditions
|
||||
[When should this skill be used? Include exact error messages, symptoms, or scenarios]
|
||||
|
||||
## Solution
|
||||
[Step-by-step solution or knowledge to apply]
|
||||
|
||||
## Verification
|
||||
[How to verify the solution worked]
|
||||
|
||||
## Example
|
||||
[Concrete example of applying this skill]
|
||||
|
||||
## Notes
|
||||
[Any caveats, edge cases, or related considerations]
|
||||
|
||||
## References
|
||||
[Optional: Links to official documentation, articles, or resources that informed this skill]
|
||||
```
|
||||
|
||||
### Step 5: Write Effective Descriptions
|
||||
|
||||
The description field is critical for skill discovery. Include:
|
||||
|
||||
- **Specific symptoms**: Exact error messages, unexpected behaviors
|
||||
- **Context markers**: Framework names, file types, tool names
|
||||
- **Action phrases**: "Use when...", "Helps with...", "Solves..."
|
||||
|
||||
Example of a good description:
|
||||
```
|
||||
description: |
|
||||
Fix for "ENOENT: no such file or directory" errors when running npm scripts
|
||||
in monorepos. Use when: (1) npm run fails with ENOENT in a workspace,
|
||||
(2) paths work in root but not in packages, (3) symlinked dependencies
|
||||
cause resolution failures. Covers node_modules resolution in Lerna,
|
||||
Turborepo, and npm workspaces.
|
||||
```
|
||||
|
||||
### Step 6: Save the Skill
|
||||
|
||||
Save new skills to the appropriate location:
|
||||
|
||||
- **Project-specific skills**: `.claude/skills/[skill-name]/SKILL.md`
|
||||
- **User-wide skills**: `~/.claude/skills/[skill-name]/SKILL.md`
|
||||
|
||||
Include any supporting scripts in a `scripts/` subdirectory if the skill benefits from
|
||||
executable helpers.
|
||||
|
||||
## Retrospective Mode
|
||||
|
||||
When `/claudeception` is invoked at the end of a session:
|
||||
|
||||
1. **Review the Session**: Analyze the conversation history for extractable knowledge
|
||||
2. **Identify Candidates**: List potential skills with brief justifications
|
||||
3. **Prioritize**: Focus on the highest-value, most reusable knowledge
|
||||
4. **Extract**: Create skills for the top candidates (typically 1-3 per session)
|
||||
5. **Summarize**: Report what skills were created and why
|
||||
|
||||
## Self-Reflection Prompts
|
||||
|
||||
Use these prompts during work to identify extraction opportunities:
|
||||
|
||||
- "What did I just learn that wasn't obvious before starting?"
|
||||
- "If I faced this exact problem again, what would I wish I knew?"
|
||||
- "What error message or symptom led me here, and what was the actual cause?"
|
||||
- "Is this pattern specific to this project, or would it help in similar projects?"
|
||||
- "What would I tell a colleague who hits this same issue?"
|
||||
|
||||
## Memory Consolidation
|
||||
|
||||
When extracting skills, also consider:
|
||||
|
||||
1. **Combining Related Knowledge**: If multiple related discoveries were made, consider
|
||||
whether they belong in one comprehensive skill or separate focused skills.
|
||||
|
||||
2. **Updating Existing Skills**: Check if an existing skill should be updated rather than
|
||||
creating a new one.
|
||||
|
||||
3. **Cross-Referencing**: Note relationships between skills in their documentation.
|
||||
|
||||
## Quality Gates
|
||||
|
||||
Before finalizing a skill, verify:
|
||||
|
||||
- [ ] Description contains specific trigger conditions
|
||||
- [ ] Solution has been verified to work
|
||||
- [ ] Content is specific enough to be actionable
|
||||
- [ ] Content is general enough to be reusable
|
||||
- [ ] No sensitive information (credentials, internal URLs) is included
|
||||
- [ ] Skill doesn't duplicate existing documentation or skills
|
||||
- [ ] Web research conducted when appropriate (for technology-specific topics)
|
||||
- [ ] References section included if web sources were consulted
|
||||
- [ ] Current best practices (post-2025) incorporated when relevant
|
||||
|
||||
## Anti-Patterns to Avoid
|
||||
|
||||
- **Over-extraction**: Not every task deserves a skill. Mundane solutions don't need preservation.
|
||||
- **Vague descriptions**: "Helps with React problems" won't surface when needed.
|
||||
- **Unverified solutions**: Only extract what actually worked.
|
||||
- **Documentation duplication**: Don't recreate official docs; link to them and add what's missing.
|
||||
- **Stale knowledge**: Mark skills with versions and dates; knowledge can become outdated.
|
||||
|
||||
## Skill Lifecycle
|
||||
|
||||
Skills should evolve:
|
||||
|
||||
1. **Creation**: Initial extraction with documented verification
|
||||
2. **Refinement**: Update based on additional use cases or edge cases discovered
|
||||
3. **Deprecation**: Mark as deprecated when underlying tools/patterns change
|
||||
4. **Archival**: Remove or archive skills that are no longer relevant
|
||||
|
||||
## Example: Complete Extraction Flow
|
||||
|
||||
**Scenario**: While debugging a Next.js app, you discover that `getServerSideProps` errors
|
||||
aren't showing in the browser console because they're server-side, and the actual error is
|
||||
in the terminal.
|
||||
|
||||
**Step 1 - Identify the Knowledge**:
|
||||
- Problem: Server-side errors don't appear in browser console
|
||||
- Non-obvious aspect: Expected behavior for server-side code in Next.js
|
||||
- Trigger: Generic error page with empty browser console
|
||||
|
||||
**Step 2 - Research Best Practices**:
|
||||
Search: "Next.js getServerSideProps error handling best practices 2026"
|
||||
- Found official docs on error handling
|
||||
- Discovered recommended patterns for try-catch in data fetching
|
||||
- Learned about error boundaries for server components
|
||||
|
||||
**Step 3-5 - Structure and Save**:
|
||||
|
||||
**Extraction**:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: nextjs-server-side-error-debugging
|
||||
description: |
|
||||
Debug getServerSideProps and getStaticProps errors in Next.js. Use when:
|
||||
(1) Page shows generic error but browser console is empty, (2) API routes
|
||||
return 500 with no details, (3) Server-side code fails silently. Check
|
||||
terminal/server logs instead of browser for actual error messages.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2024-01-15
|
||||
---
|
||||
|
||||
# Next.js Server-Side Error Debugging
|
||||
|
||||
## Problem
|
||||
Server-side errors in Next.js don't appear in the browser console, making
|
||||
debugging frustrating when you're looking in the wrong place.
|
||||
|
||||
## Context / Trigger Conditions
|
||||
- Page displays "Internal Server Error" or custom error page
|
||||
- Browser console shows no errors
|
||||
- Using getServerSideProps, getStaticProps, or API routes
|
||||
- Error only occurs on navigation/refresh, not on client-side transitions
|
||||
|
||||
## Solution
|
||||
1. Check the terminal where `npm run dev` is running—errors appear there
|
||||
2. For production, check server logs (Vercel dashboard, CloudWatch, etc.)
|
||||
3. Add try-catch with console.error in server-side functions for clarity
|
||||
4. Use Next.js error handling: return `{ notFound: true }` or `{ redirect: {...} }`
|
||||
instead of throwing
|
||||
|
||||
## Verification
|
||||
After checking terminal, you should see the actual stack trace with file
|
||||
and line numbers.
|
||||
|
||||
## Notes
|
||||
- This applies to all server-side code in Next.js, not just data fetching
|
||||
- In development, Next.js sometimes shows a modal with partial error info
|
||||
- The `next.config.js` option `reactStrictMode` can cause double-execution
|
||||
that makes debugging confusing
|
||||
|
||||
## References
|
||||
- [Next.js Data Fetching: getServerSideProps](https://nextjs.org/docs/pages/building-your-application/data-fetching/get-server-side-props)
|
||||
- [Next.js Error Handling](https://nextjs.org/docs/pages/building-your-application/routing/error-handling)
|
||||
```
|
||||
|
||||
## Integration with Workflow
|
||||
|
||||
### Automatic Trigger Conditions
|
||||
|
||||
Invoke this skill immediately after completing a task when ANY of these apply:
|
||||
|
||||
1. **Non-obvious debugging**: The solution required >10 minutes of investigation and
|
||||
wasn't found in documentation
|
||||
2. **Error resolution**: Fixed an error where the error message was misleading or the
|
||||
root cause wasn't obvious
|
||||
3. **Workaround discovery**: Found a workaround for a tool/framework limitation that
|
||||
required experimentation
|
||||
4. **Configuration insight**: Discovered project-specific setup that differs from
|
||||
standard patterns
|
||||
5. **Trial-and-error success**: Tried multiple approaches before finding what worked
|
||||
|
||||
### Explicit Invocation
|
||||
|
||||
Also invoke when:
|
||||
- User runs `/claudeception` to review the session
|
||||
- User says "save this as a skill" or similar
|
||||
- User asks "what did we learn?"
|
||||
|
||||
### Self-Check After Each Task
|
||||
|
||||
After completing any significant task, ask yourself:
|
||||
- "Did I just spend meaningful time investigating something?"
|
||||
- "Would future-me benefit from having this documented?"
|
||||
- "Was the solution non-obvious from documentation alone?"
|
||||
|
||||
If yes to any, invoke this skill immediately.
|
||||
|
||||
Remember: The goal is continuous, autonomous improvement. Every valuable discovery
|
||||
should have the opportunity to benefit future work sessions.
|
||||
54
dot_claude/skills/claudeception/WARP.md
Normal file
54
dot_claude/skills/claudeception/WARP.md
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
# WARP.md
|
||||
|
||||
This file provides guidance to WARP (warp.dev) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Claudeception is a **Claude Code skill** for continuous learning—it enables Claude Code to autonomously extract and preserve learned knowledge into reusable skills. It is not an application codebase but rather a skill definition with documentation and examples.
|
||||
|
||||
## Key Files
|
||||
|
||||
- `SKILL.md` — The main skill definition (YAML frontmatter + instructions). This is what Claude Code loads.
|
||||
- `resources/skill-template.md` — Template for creating new skills
|
||||
- `examples/` — Sample extracted skills demonstrating proper format
|
||||
|
||||
## Skill File Format
|
||||
|
||||
Skills use YAML frontmatter followed by markdown:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: kebab-case-name
|
||||
description: |
|
||||
Must be precise for semantic matching. Include:
|
||||
(1) exact use cases, (2) trigger conditions like error messages,
|
||||
(3) what problem this solves
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
allowed-tools:
|
||||
- Read
|
||||
- Write
|
||||
- Bash
|
||||
- Grep
|
||||
- Glob
|
||||
---
|
||||
```
|
||||
|
||||
The description field is critical—it determines when the skill surfaces during semantic matching.
|
||||
|
||||
## Installation Paths
|
||||
|
||||
- **User-level**: `~/.claude/skills/[skill-name]/`
|
||||
- **Project-level**: `.claude/skills/[skill-name]/`
|
||||
|
||||
## Quality Criteria for Skills
|
||||
|
||||
When modifying or creating skills, ensure:
|
||||
- **Reusable**: Helps with future tasks, not just one instance
|
||||
- **Non-trivial**: Requires discovery, not just documentation lookup
|
||||
- **Specific**: Clear trigger conditions (exact error messages, symptoms)
|
||||
- **Verified**: Solution has actually been tested and works
|
||||
|
||||
## Research Foundation
|
||||
|
||||
The approach is based on academic work on skill libraries (Voyager, CASCADE, SEAgent, Reflexion). See `resources/research-references.md` for details.
|
||||
1
dot_claude/skills/claudeception/dot_git/HEAD
Normal file
1
dot_claude/skills/claudeception/dot_git/HEAD
Normal file
|
|
@ -0,0 +1 @@
|
|||
ref: refs/heads/main
|
||||
13
dot_claude/skills/claudeception/dot_git/config
Normal file
13
dot_claude/skills/claudeception/dot_git/config
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
[core]
|
||||
repositoryformatversion = 0
|
||||
filemode = true
|
||||
bare = false
|
||||
logallrefupdates = true
|
||||
ignorecase = true
|
||||
precomposeunicode = true
|
||||
[remote "origin"]
|
||||
url = https://github.com/blader/Claudeception.git
|
||||
fetch = +refs/heads/*:refs/remotes/origin/*
|
||||
[branch "main"]
|
||||
remote = origin
|
||||
merge = refs/heads/main
|
||||
1
dot_claude/skills/claudeception/dot_git/description
Normal file
1
dot_claude/skills/claudeception/dot_git/description
Normal file
|
|
@ -0,0 +1 @@
|
|||
Unnamed repository; edit this file 'description' to name the repository.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to check the commit log message taken by
|
||||
# applypatch from an e-mail message.
|
||||
#
|
||||
# The hook should exit with non-zero status after issuing an
|
||||
# appropriate message if it wants to stop the commit. The hook is
|
||||
# allowed to edit the commit message file.
|
||||
#
|
||||
# To enable this hook, rename this file to "applypatch-msg".
|
||||
|
||||
. git-sh-setup
|
||||
commitmsg="$(git rev-parse --git-path hooks/commit-msg)"
|
||||
test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"}
|
||||
:
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to check the commit log message.
|
||||
# Called by "git commit" with one argument, the name of the file
|
||||
# that has the commit message. The hook should exit with non-zero
|
||||
# status after issuing an appropriate message if it wants to stop the
|
||||
# commit. The hook is allowed to edit the commit message file.
|
||||
#
|
||||
# To enable this hook, rename this file to "commit-msg".
|
||||
|
||||
# Uncomment the below to add a Signed-off-by line to the message.
|
||||
# Doing this in a hook is a bad idea in general, but the prepare-commit-msg
|
||||
# hook is more suited to it.
|
||||
#
|
||||
# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
|
||||
# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
|
||||
|
||||
# This example catches duplicate Signed-off-by lines.
|
||||
|
||||
test "" = "$(grep '^Signed-off-by: ' "$1" |
|
||||
sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
|
||||
echo >&2 Duplicate Signed-off-by lines.
|
||||
exit 1
|
||||
}
|
||||
|
|
@ -0,0 +1,174 @@
|
|||
#!/usr/bin/perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use IPC::Open2;
|
||||
|
||||
# An example hook script to integrate Watchman
|
||||
# (https://facebook.github.io/watchman/) with git to speed up detecting
|
||||
# new and modified files.
|
||||
#
|
||||
# The hook is passed a version (currently 2) and last update token
|
||||
# formatted as a string and outputs to stdout a new update token and
|
||||
# all files that have been modified since the update token. Paths must
|
||||
# be relative to the root of the working tree and separated by a single NUL.
|
||||
#
|
||||
# To enable this hook, rename this file to "query-watchman" and set
|
||||
# 'git config core.fsmonitor .git/hooks/query-watchman'
|
||||
#
|
||||
my ($version, $last_update_token) = @ARGV;
|
||||
|
||||
# Uncomment for debugging
|
||||
# print STDERR "$0 $version $last_update_token\n";
|
||||
|
||||
# Check the hook interface version
|
||||
if ($version ne 2) {
|
||||
die "Unsupported query-fsmonitor hook version '$version'.\n" .
|
||||
"Falling back to scanning...\n";
|
||||
}
|
||||
|
||||
my $git_work_tree = get_working_dir();
|
||||
|
||||
my $retry = 1;
|
||||
|
||||
my $json_pkg;
|
||||
eval {
|
||||
require JSON::XS;
|
||||
$json_pkg = "JSON::XS";
|
||||
1;
|
||||
} or do {
|
||||
require JSON::PP;
|
||||
$json_pkg = "JSON::PP";
|
||||
};
|
||||
|
||||
launch_watchman();
|
||||
|
||||
sub launch_watchman {
|
||||
my $o = watchman_query();
|
||||
if (is_work_tree_watched($o)) {
|
||||
output_result($o->{clock}, @{$o->{files}});
|
||||
}
|
||||
}
|
||||
|
||||
sub output_result {
|
||||
my ($clockid, @files) = @_;
|
||||
|
||||
# Uncomment for debugging watchman output
|
||||
# open (my $fh, ">", ".git/watchman-output.out");
|
||||
# binmode $fh, ":utf8";
|
||||
# print $fh "$clockid\n@files\n";
|
||||
# close $fh;
|
||||
|
||||
binmode STDOUT, ":utf8";
|
||||
print $clockid;
|
||||
print "\0";
|
||||
local $, = "\0";
|
||||
print @files;
|
||||
}
|
||||
|
||||
sub watchman_clock {
|
||||
my $response = qx/watchman clock "$git_work_tree"/;
|
||||
die "Failed to get clock id on '$git_work_tree'.\n" .
|
||||
"Falling back to scanning...\n" if $? != 0;
|
||||
|
||||
return $json_pkg->new->utf8->decode($response);
|
||||
}
|
||||
|
||||
sub watchman_query {
|
||||
my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty')
|
||||
or die "open2() failed: $!\n" .
|
||||
"Falling back to scanning...\n";
|
||||
|
||||
# In the query expression below we're asking for names of files that
|
||||
# changed since $last_update_token but not from the .git folder.
|
||||
#
|
||||
# To accomplish this, we're using the "since" generator to use the
|
||||
# recency index to select candidate nodes and "fields" to limit the
|
||||
# output to file names only. Then we're using the "expression" term to
|
||||
# further constrain the results.
|
||||
my $last_update_line = "";
|
||||
if (substr($last_update_token, 0, 1) eq "c") {
|
||||
$last_update_token = "\"$last_update_token\"";
|
||||
$last_update_line = qq[\n"since": $last_update_token,];
|
||||
}
|
||||
my $query = <<" END";
|
||||
["query", "$git_work_tree", {$last_update_line
|
||||
"fields": ["name"],
|
||||
"expression": ["not", ["dirname", ".git"]]
|
||||
}]
|
||||
END
|
||||
|
||||
# Uncomment for debugging the watchman query
|
||||
# open (my $fh, ">", ".git/watchman-query.json");
|
||||
# print $fh $query;
|
||||
# close $fh;
|
||||
|
||||
print CHLD_IN $query;
|
||||
close CHLD_IN;
|
||||
my $response = do {local $/; <CHLD_OUT>};
|
||||
|
||||
# Uncomment for debugging the watch response
|
||||
# open ($fh, ">", ".git/watchman-response.json");
|
||||
# print $fh $response;
|
||||
# close $fh;
|
||||
|
||||
die "Watchman: command returned no output.\n" .
|
||||
"Falling back to scanning...\n" if $response eq "";
|
||||
die "Watchman: command returned invalid output: $response\n" .
|
||||
"Falling back to scanning...\n" unless $response =~ /^\{/;
|
||||
|
||||
return $json_pkg->new->utf8->decode($response);
|
||||
}
|
||||
|
||||
sub is_work_tree_watched {
|
||||
my ($output) = @_;
|
||||
my $error = $output->{error};
|
||||
if ($retry > 0 and $error and $error =~ m/unable to resolve root .* directory (.*) is not watched/) {
|
||||
$retry--;
|
||||
my $response = qx/watchman watch "$git_work_tree"/;
|
||||
die "Failed to make watchman watch '$git_work_tree'.\n" .
|
||||
"Falling back to scanning...\n" if $? != 0;
|
||||
$output = $json_pkg->new->utf8->decode($response);
|
||||
$error = $output->{error};
|
||||
die "Watchman: $error.\n" .
|
||||
"Falling back to scanning...\n" if $error;
|
||||
|
||||
# Uncomment for debugging watchman output
|
||||
# open (my $fh, ">", ".git/watchman-output.out");
|
||||
# close $fh;
|
||||
|
||||
# Watchman will always return all files on the first query so
|
||||
# return the fast "everything is dirty" flag to git and do the
|
||||
# Watchman query just to get it over with now so we won't pay
|
||||
# the cost in git to look up each individual file.
|
||||
my $o = watchman_clock();
|
||||
$error = $output->{error};
|
||||
|
||||
die "Watchman: $error.\n" .
|
||||
"Falling back to scanning...\n" if $error;
|
||||
|
||||
output_result($o->{clock}, ("/"));
|
||||
$last_update_token = $o->{clock};
|
||||
|
||||
eval { launch_watchman() };
|
||||
return 0;
|
||||
}
|
||||
|
||||
die "Watchman: $error.\n" .
|
||||
"Falling back to scanning...\n" if $error;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub get_working_dir {
|
||||
my $working_dir;
|
||||
if ($^O =~ 'msys' || $^O =~ 'cygwin') {
|
||||
$working_dir = Win32::GetCwd();
|
||||
$working_dir =~ tr/\\/\//;
|
||||
} else {
|
||||
require Cwd;
|
||||
$working_dir = Cwd::cwd();
|
||||
}
|
||||
|
||||
return $working_dir;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to prepare a packed repository for use over
|
||||
# dumb transports.
|
||||
#
|
||||
# To enable this hook, rename this file to "post-update".
|
||||
|
||||
exec git update-server-info
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to verify what is about to be committed
|
||||
# by applypatch from an e-mail message.
|
||||
#
|
||||
# The hook should exit with non-zero status after issuing an
|
||||
# appropriate message if it wants to stop the commit.
|
||||
#
|
||||
# To enable this hook, rename this file to "pre-applypatch".
|
||||
|
||||
. git-sh-setup
|
||||
precommit="$(git rev-parse --git-path hooks/pre-commit)"
|
||||
test -x "$precommit" && exec "$precommit" ${1+"$@"}
|
||||
:
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to verify what is about to be committed.
|
||||
# Called by "git commit" with no arguments. The hook should
|
||||
# exit with non-zero status after issuing an appropriate message if
|
||||
# it wants to stop the commit.
|
||||
#
|
||||
# To enable this hook, rename this file to "pre-commit".
|
||||
|
||||
if git rev-parse --verify HEAD >/dev/null 2>&1
|
||||
then
|
||||
against=HEAD
|
||||
else
|
||||
# Initial commit: diff against an empty tree object
|
||||
against=$(git hash-object -t tree /dev/null)
|
||||
fi
|
||||
|
||||
# If you want to allow non-ASCII filenames set this variable to true.
|
||||
allownonascii=$(git config --type=bool hooks.allownonascii)
|
||||
|
||||
# Redirect output to stderr.
|
||||
exec 1>&2
|
||||
|
||||
# Cross platform projects tend to avoid non-ASCII filenames; prevent
|
||||
# them from being added to the repository. We exploit the fact that the
|
||||
# printable range starts at the space character and ends with tilde.
|
||||
if [ "$allownonascii" != "true" ] &&
|
||||
# Note that the use of brackets around a tr range is ok here, (it's
|
||||
# even required, for portability to Solaris 10's /usr/bin/tr), since
|
||||
# the square bracket bytes happen to fall in the designated range.
|
||||
test $(git diff-index --cached --name-only --diff-filter=A -z $against |
|
||||
LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
|
||||
then
|
||||
cat <<\EOF
|
||||
Error: Attempt to add a non-ASCII file name.
|
||||
|
||||
This can cause problems if you want to work with people on other platforms.
|
||||
|
||||
To be portable it is advisable to rename the file.
|
||||
|
||||
If you know what you are doing you can disable this check using:
|
||||
|
||||
git config hooks.allownonascii true
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If there are whitespace errors, print the offending file names and fail.
|
||||
exec git diff-index --check --cached $against --
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to verify what is about to be committed.
|
||||
# Called by "git merge" with no arguments. The hook should
|
||||
# exit with non-zero status after issuing an appropriate message to
|
||||
# stderr if it wants to stop the merge commit.
|
||||
#
|
||||
# To enable this hook, rename this file to "pre-merge-commit".
|
||||
|
||||
. git-sh-setup
|
||||
test -x "$GIT_DIR/hooks/pre-commit" &&
|
||||
exec "$GIT_DIR/hooks/pre-commit"
|
||||
:
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
#!/bin/sh
|
||||
|
||||
# An example hook script to verify what is about to be pushed. Called by "git
|
||||
# push" after it has checked the remote status, but before anything has been
|
||||
# pushed. If this script exits with a non-zero status nothing will be pushed.
|
||||
#
|
||||
# This hook is called with the following parameters:
|
||||
#
|
||||
# $1 -- Name of the remote to which the push is being done
|
||||
# $2 -- URL to which the push is being done
|
||||
#
|
||||
# If pushing without using a named remote those arguments will be equal.
|
||||
#
|
||||
# Information about the commits which are being pushed is supplied as lines to
|
||||
# the standard input in the form:
|
||||
#
|
||||
# <local ref> <local oid> <remote ref> <remote oid>
|
||||
#
|
||||
# This sample shows how to prevent push of commits where the log message starts
|
||||
# with "WIP" (work in progress).
|
||||
|
||||
remote="$1"
|
||||
url="$2"
|
||||
|
||||
zero=$(git hash-object --stdin </dev/null | tr '[0-9a-f]' '0')
|
||||
|
||||
while read local_ref local_oid remote_ref remote_oid
|
||||
do
|
||||
if test "$local_oid" = "$zero"
|
||||
then
|
||||
# Handle delete
|
||||
:
|
||||
else
|
||||
if test "$remote_oid" = "$zero"
|
||||
then
|
||||
# New branch, examine all commits
|
||||
range="$local_oid"
|
||||
else
|
||||
# Update to existing branch, examine new commits
|
||||
range="$remote_oid..$local_oid"
|
||||
fi
|
||||
|
||||
# Check for WIP commit
|
||||
commit=$(git rev-list -n 1 --grep '^WIP' "$range")
|
||||
if test -n "$commit"
|
||||
then
|
||||
echo >&2 "Found WIP commit in $local_ref, not pushing"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2006, 2008 Junio C Hamano
|
||||
#
|
||||
# The "pre-rebase" hook is run just before "git rebase" starts doing
|
||||
# its job, and can prevent the command from running by exiting with
|
||||
# non-zero status.
|
||||
#
|
||||
# The hook is called with the following parameters:
|
||||
#
|
||||
# $1 -- the upstream the series was forked from.
|
||||
# $2 -- the branch being rebased (or empty when rebasing the current branch).
|
||||
#
|
||||
# This sample shows how to prevent topic branches that are already
|
||||
# merged to 'next' branch from getting rebased, because allowing it
|
||||
# would result in rebasing already published history.
|
||||
|
||||
publish=next
|
||||
basebranch="$1"
|
||||
if test "$#" = 2
|
||||
then
|
||||
topic="refs/heads/$2"
|
||||
else
|
||||
topic=`git symbolic-ref HEAD` ||
|
||||
exit 0 ;# we do not interrupt rebasing detached HEAD
|
||||
fi
|
||||
|
||||
case "$topic" in
|
||||
refs/heads/??/*)
|
||||
;;
|
||||
*)
|
||||
exit 0 ;# we do not interrupt others.
|
||||
;;
|
||||
esac
|
||||
|
||||
# Now we are dealing with a topic branch being rebased
|
||||
# on top of master. Is it OK to rebase it?
|
||||
|
||||
# Does the topic really exist?
|
||||
git show-ref -q "$topic" || {
|
||||
echo >&2 "No such branch $topic"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Is topic fully merged to master?
|
||||
not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
|
||||
if test -z "$not_in_master"
|
||||
then
|
||||
echo >&2 "$topic is fully merged to master; better remove it."
|
||||
exit 1 ;# we could allow it, but there is no point.
|
||||
fi
|
||||
|
||||
# Is topic ever merged to next? If so you should not be rebasing it.
|
||||
only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
|
||||
only_next_2=`git rev-list ^master ${publish} | sort`
|
||||
if test "$only_next_1" = "$only_next_2"
|
||||
then
|
||||
not_in_topic=`git rev-list "^$topic" master`
|
||||
if test -z "$not_in_topic"
|
||||
then
|
||||
echo >&2 "$topic is already up to date with master"
|
||||
exit 1 ;# we could allow it, but there is no point.
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
|
||||
/usr/bin/perl -e '
|
||||
my $topic = $ARGV[0];
|
||||
my $msg = "* $topic has commits already merged to public branch:\n";
|
||||
my (%not_in_next) = map {
|
||||
/^([0-9a-f]+) /;
|
||||
($1 => 1);
|
||||
} split(/\n/, $ARGV[1]);
|
||||
for my $elem (map {
|
||||
/^([0-9a-f]+) (.*)$/;
|
||||
[$1 => $2];
|
||||
} split(/\n/, $ARGV[2])) {
|
||||
if (!exists $not_in_next{$elem->[0]}) {
|
||||
if ($msg) {
|
||||
print STDERR $msg;
|
||||
undef $msg;
|
||||
}
|
||||
print STDERR " $elem->[1]\n";
|
||||
}
|
||||
}
|
||||
' "$topic" "$not_in_next" "$not_in_master"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
<<\DOC_END
|
||||
|
||||
This sample hook safeguards topic branches that have been
|
||||
published from being rewound.
|
||||
|
||||
The workflow assumed here is:
|
||||
|
||||
* Once a topic branch forks from "master", "master" is never
|
||||
merged into it again (either directly or indirectly).
|
||||
|
||||
* Once a topic branch is fully cooked and merged into "master",
|
||||
it is deleted. If you need to build on top of it to correct
|
||||
earlier mistakes, a new topic branch is created by forking at
|
||||
the tip of the "master". This is not strictly necessary, but
|
||||
it makes it easier to keep your history simple.
|
||||
|
||||
* Whenever you need to test or publish your changes to topic
|
||||
branches, merge them into "next" branch.
|
||||
|
||||
The script, being an example, hardcodes the publish branch name
|
||||
to be "next", but it is trivial to make it configurable via
|
||||
$GIT_DIR/config mechanism.
|
||||
|
||||
With this workflow, you would want to know:
|
||||
|
||||
(1) ... if a topic branch has ever been merged to "next". Young
|
||||
topic branches can have stupid mistakes you would rather
|
||||
clean up before publishing, and things that have not been
|
||||
merged into other branches can be easily rebased without
|
||||
affecting other people. But once it is published, you would
|
||||
not want to rewind it.
|
||||
|
||||
(2) ... if a topic branch has been fully merged to "master".
|
||||
Then you can delete it. More importantly, you should not
|
||||
build on top of it -- other people may already want to
|
||||
change things related to the topic as patches against your
|
||||
"master", so if you need further changes, it is better to
|
||||
fork the topic (perhaps with the same name) afresh from the
|
||||
tip of "master".
|
||||
|
||||
Let's look at this example:
|
||||
|
||||
o---o---o---o---o---o---o---o---o---o "next"
|
||||
/ / / /
|
||||
/ a---a---b A / /
|
||||
/ / / /
|
||||
/ / c---c---c---c B /
|
||||
/ / / \ /
|
||||
/ / / b---b C \ /
|
||||
/ / / / \ /
|
||||
---o---o---o---o---o---o---o---o---o---o---o "master"
|
||||
|
||||
|
||||
A, B and C are topic branches.
|
||||
|
||||
* A has one fix since it was merged up to "next".
|
||||
|
||||
* B has finished. It has been fully merged up to "master" and "next",
|
||||
and is ready to be deleted.
|
||||
|
||||
* C has not merged to "next" at all.
|
||||
|
||||
We would want to allow C to be rebased, refuse A, and encourage
|
||||
B to be deleted.
|
||||
|
||||
To compute (1):
|
||||
|
||||
git rev-list ^master ^topic next
|
||||
git rev-list ^master next
|
||||
|
||||
if these match, topic has not merged in next at all.
|
||||
|
||||
To compute (2):
|
||||
|
||||
git rev-list master..topic
|
||||
|
||||
if this is empty, it is fully merged to "master".
|
||||
|
||||
DOC_END
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to make use of push options.
|
||||
# The example simply echoes all push options that start with 'echoback='
|
||||
# and rejects all pushes when the "reject" push option is used.
|
||||
#
|
||||
# To enable this hook, rename this file to "pre-receive".
|
||||
|
||||
if test -n "$GIT_PUSH_OPTION_COUNT"
|
||||
then
|
||||
i=0
|
||||
while test "$i" -lt "$GIT_PUSH_OPTION_COUNT"
|
||||
do
|
||||
eval "value=\$GIT_PUSH_OPTION_$i"
|
||||
case "$value" in
|
||||
echoback=*)
|
||||
echo "echo from the pre-receive-hook: ${value#*=}" >&2
|
||||
;;
|
||||
reject)
|
||||
exit 1
|
||||
esac
|
||||
i=$((i + 1))
|
||||
done
|
||||
fi
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to prepare the commit log message.
|
||||
# Called by "git commit" with the name of the file that has the
|
||||
# commit message, followed by the description of the commit
|
||||
# message's source. The hook's purpose is to edit the commit
|
||||
# message file. If the hook fails with a non-zero status,
|
||||
# the commit is aborted.
|
||||
#
|
||||
# To enable this hook, rename this file to "prepare-commit-msg".
|
||||
|
||||
# This hook includes three examples. The first one removes the
|
||||
# "# Please enter the commit message..." help message.
|
||||
#
|
||||
# The second includes the output of "git diff --name-status -r"
|
||||
# into the message, just before the "git status" output. It is
|
||||
# commented because it doesn't cope with --amend or with squashed
|
||||
# commits.
|
||||
#
|
||||
# The third example adds a Signed-off-by line to the message, that can
|
||||
# still be edited. This is rarely a good idea.
|
||||
|
||||
COMMIT_MSG_FILE=$1
|
||||
COMMIT_SOURCE=$2
|
||||
SHA1=$3
|
||||
|
||||
/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE"
|
||||
|
||||
# case "$COMMIT_SOURCE,$SHA1" in
|
||||
# ,|template,)
|
||||
# /usr/bin/perl -i.bak -pe '
|
||||
# print "\n" . `git diff --cached --name-status -r`
|
||||
# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;;
|
||||
# *) ;;
|
||||
# esac
|
||||
|
||||
# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
|
||||
# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE"
|
||||
# if test -z "$COMMIT_SOURCE"
|
||||
# then
|
||||
# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE"
|
||||
# fi
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
#!/bin/sh
|
||||
|
||||
# An example hook script to update a checked-out tree on a git push.
|
||||
#
|
||||
# This hook is invoked by git-receive-pack(1) when it reacts to git
|
||||
# push and updates reference(s) in its repository, and when the push
|
||||
# tries to update the branch that is currently checked out and the
|
||||
# receive.denyCurrentBranch configuration variable is set to
|
||||
# updateInstead.
|
||||
#
|
||||
# By default, such a push is refused if the working tree and the index
|
||||
# of the remote repository has any difference from the currently
|
||||
# checked out commit; when both the working tree and the index match
|
||||
# the current commit, they are updated to match the newly pushed tip
|
||||
# of the branch. This hook is to be used to override the default
|
||||
# behaviour; however the code below reimplements the default behaviour
|
||||
# as a starting point for convenient modification.
|
||||
#
|
||||
# The hook receives the commit with which the tip of the current
|
||||
# branch is going to be updated:
|
||||
commit=$1
|
||||
|
||||
# It can exit with a non-zero status to refuse the push (when it does
|
||||
# so, it must not modify the index or the working tree).
|
||||
die () {
|
||||
echo >&2 "$*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Or it can make any necessary changes to the working tree and to the
|
||||
# index to bring them to the desired state when the tip of the current
|
||||
# branch is updated to the new commit, and exit with a zero status.
|
||||
#
|
||||
# For example, the hook can simply run git read-tree -u -m HEAD "$1"
|
||||
# in order to emulate git fetch that is run in the reverse direction
|
||||
# with git push, as the two-tree form of git read-tree -u -m is
|
||||
# essentially the same as git switch or git checkout that switches
|
||||
# branches while keeping the local changes in the working tree that do
|
||||
# not interfere with the difference between the branches.
|
||||
|
||||
# The below is a more-or-less exact translation to shell of the C code
|
||||
# for the default behaviour for git's push-to-checkout hook defined in
|
||||
# the push_to_deploy() function in builtin/receive-pack.c.
|
||||
#
|
||||
# Note that the hook will be executed from the repository directory,
|
||||
# not from the working tree, so if you want to perform operations on
|
||||
# the working tree, you will have to adapt your code accordingly, e.g.
|
||||
# by adding "cd .." or using relative paths.
|
||||
|
||||
if ! git update-index -q --ignore-submodules --refresh
|
||||
then
|
||||
die "Up-to-date check failed"
|
||||
fi
|
||||
|
||||
if ! git diff-files --quiet --ignore-submodules --
|
||||
then
|
||||
die "Working directory has unstaged changes"
|
||||
fi
|
||||
|
||||
# This is a rough translation of:
|
||||
#
|
||||
# head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX
|
||||
if git cat-file -e HEAD 2>/dev/null
|
||||
then
|
||||
head=HEAD
|
||||
else
|
||||
head=$(git hash-object -t tree --stdin </dev/null)
|
||||
fi
|
||||
|
||||
if ! git diff-index --quiet --cached --ignore-submodules $head --
|
||||
then
|
||||
die "Working directory has staged changes"
|
||||
fi
|
||||
|
||||
if ! git read-tree -u -m "$commit"
|
||||
then
|
||||
die "Could not update working tree to new HEAD"
|
||||
fi
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
#!/bin/sh
|
||||
|
||||
# An example hook script to validate a patch (and/or patch series) before
|
||||
# sending it via email.
|
||||
#
|
||||
# The hook should exit with non-zero status after issuing an appropriate
|
||||
# message if it wants to prevent the email(s) from being sent.
|
||||
#
|
||||
# To enable this hook, rename this file to "sendemail-validate".
|
||||
#
|
||||
# By default, it will only check that the patch(es) can be applied on top of
|
||||
# the default upstream branch without conflicts in a secondary worktree. After
|
||||
# validation (successful or not) of the last patch of a series, the worktree
|
||||
# will be deleted.
|
||||
#
|
||||
# The following config variables can be set to change the default remote and
|
||||
# remote ref that are used to apply the patches against:
|
||||
#
|
||||
# sendemail.validateRemote (default: origin)
|
||||
# sendemail.validateRemoteRef (default: HEAD)
|
||||
#
|
||||
# Replace the TODO placeholders with appropriate checks according to your
|
||||
# needs.
|
||||
|
||||
validate_cover_letter () {
|
||||
file="$1"
|
||||
# TODO: Replace with appropriate checks (e.g. spell checking).
|
||||
true
|
||||
}
|
||||
|
||||
validate_patch () {
|
||||
file="$1"
|
||||
# Ensure that the patch applies without conflicts.
|
||||
git am -3 "$file" || return
|
||||
# TODO: Replace with appropriate checks for this patch
|
||||
# (e.g. checkpatch.pl).
|
||||
true
|
||||
}
|
||||
|
||||
validate_series () {
|
||||
# TODO: Replace with appropriate checks for the whole series
|
||||
# (e.g. quick build, coding style checks, etc.).
|
||||
true
|
||||
}
|
||||
|
||||
# main -------------------------------------------------------------------------
|
||||
|
||||
if test "$GIT_SENDEMAIL_FILE_COUNTER" = 1
|
||||
then
|
||||
remote=$(git config --default origin --get sendemail.validateRemote) &&
|
||||
ref=$(git config --default HEAD --get sendemail.validateRemoteRef) &&
|
||||
worktree=$(mktemp --tmpdir -d sendemail-validate.XXXXXXX) &&
|
||||
git worktree add -fd --checkout "$worktree" "refs/remotes/$remote/$ref" &&
|
||||
git config --replace-all sendemail.validateWorktree "$worktree"
|
||||
else
|
||||
worktree=$(git config --get sendemail.validateWorktree)
|
||||
fi || {
|
||||
echo "sendemail-validate: error: failed to prepare worktree" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
unset GIT_DIR GIT_WORK_TREE
|
||||
cd "$worktree" &&
|
||||
|
||||
if grep -q "^diff --git " "$1"
|
||||
then
|
||||
validate_patch "$1"
|
||||
else
|
||||
validate_cover_letter "$1"
|
||||
fi &&
|
||||
|
||||
if test "$GIT_SENDEMAIL_FILE_COUNTER" = "$GIT_SENDEMAIL_FILE_TOTAL"
|
||||
then
|
||||
git config --unset-all sendemail.validateWorktree &&
|
||||
trap 'git worktree remove -ff "$worktree"' EXIT &&
|
||||
validate_series
|
||||
fi
|
||||
|
|
@ -0,0 +1,128 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# An example hook script to block unannotated tags from entering.
|
||||
# Called by "git receive-pack" with arguments: refname sha1-old sha1-new
|
||||
#
|
||||
# To enable this hook, rename this file to "update".
|
||||
#
|
||||
# Config
|
||||
# ------
|
||||
# hooks.allowunannotated
|
||||
# This boolean sets whether unannotated tags will be allowed into the
|
||||
# repository. By default they won't be.
|
||||
# hooks.allowdeletetag
|
||||
# This boolean sets whether deleting tags will be allowed in the
|
||||
# repository. By default they won't be.
|
||||
# hooks.allowmodifytag
|
||||
# This boolean sets whether a tag may be modified after creation. By default
|
||||
# it won't be.
|
||||
# hooks.allowdeletebranch
|
||||
# This boolean sets whether deleting branches will be allowed in the
|
||||
# repository. By default they won't be.
|
||||
# hooks.denycreatebranch
|
||||
# This boolean sets whether remotely creating branches will be denied
|
||||
# in the repository. By default this is allowed.
|
||||
#
|
||||
|
||||
# --- Command line
|
||||
refname="$1"
|
||||
oldrev="$2"
|
||||
newrev="$3"
|
||||
|
||||
# --- Safety check
|
||||
if [ -z "$GIT_DIR" ]; then
|
||||
echo "Don't run this script from the command line." >&2
|
||||
echo " (if you want, you could supply GIT_DIR then run" >&2
|
||||
echo " $0 <ref> <oldrev> <newrev>)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then
|
||||
echo "usage: $0 <ref> <oldrev> <newrev>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- Config
|
||||
allowunannotated=$(git config --type=bool hooks.allowunannotated)
|
||||
allowdeletebranch=$(git config --type=bool hooks.allowdeletebranch)
|
||||
denycreatebranch=$(git config --type=bool hooks.denycreatebranch)
|
||||
allowdeletetag=$(git config --type=bool hooks.allowdeletetag)
|
||||
allowmodifytag=$(git config --type=bool hooks.allowmodifytag)
|
||||
|
||||
# check for no description
|
||||
projectdesc=$(sed -e '1q' "$GIT_DIR/description")
|
||||
case "$projectdesc" in
|
||||
"Unnamed repository"* | "")
|
||||
echo "*** Project description file hasn't been set" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# --- Check types
|
||||
# if $newrev is 0000...0000, it's a commit to delete a ref.
|
||||
zero=$(git hash-object --stdin </dev/null | tr '[0-9a-f]' '0')
|
||||
if [ "$newrev" = "$zero" ]; then
|
||||
newrev_type=delete
|
||||
else
|
||||
newrev_type=$(git cat-file -t $newrev)
|
||||
fi
|
||||
|
||||
case "$refname","$newrev_type" in
|
||||
refs/tags/*,commit)
|
||||
# un-annotated tag
|
||||
short_refname=${refname##refs/tags/}
|
||||
if [ "$allowunannotated" != "true" ]; then
|
||||
echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2
|
||||
echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
refs/tags/*,delete)
|
||||
# delete tag
|
||||
if [ "$allowdeletetag" != "true" ]; then
|
||||
echo "*** Deleting a tag is not allowed in this repository" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
refs/tags/*,tag)
|
||||
# annotated tag
|
||||
if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1
|
||||
then
|
||||
echo "*** Tag '$refname' already exists." >&2
|
||||
echo "*** Modifying a tag is not allowed in this repository." >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
refs/heads/*,commit)
|
||||
# branch
|
||||
if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then
|
||||
echo "*** Creating a branch is not allowed in this repository" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
refs/heads/*,delete)
|
||||
# delete branch
|
||||
if [ "$allowdeletebranch" != "true" ]; then
|
||||
echo "*** Deleting a branch is not allowed in this repository" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
refs/remotes/*,commit)
|
||||
# tracking branch
|
||||
;;
|
||||
refs/remotes/*,delete)
|
||||
# delete tracking branch
|
||||
if [ "$allowdeletebranch" != "true" ]; then
|
||||
echo "*** Deleting a tracking branch is not allowed in this repository" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Anything else (is there anything else?)
|
||||
echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# --- Finished
|
||||
exit 0
|
||||
BIN
dot_claude/skills/claudeception/dot_git/index
Normal file
BIN
dot_claude/skills/claudeception/dot_git/index
Normal file
Binary file not shown.
6
dot_claude/skills/claudeception/dot_git/info/exclude
Normal file
6
dot_claude/skills/claudeception/dot_git/info/exclude
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
# git ls-files --others --exclude-from=.git/info/exclude
|
||||
# Lines that start with '#' are comments.
|
||||
# For a project mostly in C, the following would be a good set of
|
||||
# exclude patterns (uncomment them if you want to use them):
|
||||
# *.[oa]
|
||||
# *~
|
||||
1
dot_claude/skills/claudeception/dot_git/logs/HEAD
Normal file
1
dot_claude/skills/claudeception/dot_git/logs/HEAD
Normal file
|
|
@ -0,0 +1 @@
|
|||
0000000000000000000000000000000000000000 7d7f5915f90db26e1a3fc52db6ac2e68d6d705a2 Viktor Barzin <viktorbarzin@meta.com> 1769817192 +0000 clone: from https://github.com/blader/Claudeception.git
|
||||
|
|
@ -0,0 +1 @@
|
|||
0000000000000000000000000000000000000000 7d7f5915f90db26e1a3fc52db6ac2e68d6d705a2 Viktor Barzin <viktorbarzin@meta.com> 1769817192 +0000 clone: from https://github.com/blader/Claudeception.git
|
||||
|
|
@ -0,0 +1 @@
|
|||
0000000000000000000000000000000000000000 7d7f5915f90db26e1a3fc52db6ac2e68d6d705a2 Viktor Barzin <viktorbarzin@meta.com> 1769817192 +0000 clone: from https://github.com/blader/Claudeception.git
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
2
dot_claude/skills/claudeception/dot_git/packed-refs
Normal file
2
dot_claude/skills/claudeception/dot_git/packed-refs
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# pack-refs with: peeled fully-peeled sorted
|
||||
7d7f5915f90db26e1a3fc52db6ac2e68d6d705a2 refs/remotes/origin/main
|
||||
1
dot_claude/skills/claudeception/dot_git/refs/heads/main
Normal file
1
dot_claude/skills/claudeception/dot_git/refs/heads/main
Normal file
|
|
@ -0,0 +1 @@
|
|||
7d7f5915f90db26e1a3fc52db6ac2e68d6d705a2
|
||||
|
|
@ -0,0 +1 @@
|
|||
ref: refs/remotes/origin/main
|
||||
0
dot_claude/skills/claudeception/dot_git/refs/tags/.keep
Normal file
0
dot_claude/skills/claudeception/dot_git/refs/tags/.keep
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
name: nextjs-server-side-error-debugging
|
||||
description: |
|
||||
Debug getServerSideProps and getStaticProps errors in Next.js. Use when:
|
||||
(1) Page shows generic error but browser console is empty, (2) API routes
|
||||
return 500 with no details, (3) Server-side code fails silently, (4) Error
|
||||
only occurs on refresh not client navigation. Check terminal/server logs
|
||||
instead of browser for actual error messages.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2024-01-15
|
||||
---
|
||||
|
||||
# Next.js Server-Side Error Debugging
|
||||
|
||||
## Problem
|
||||
|
||||
Server-side errors in Next.js don't appear in the browser console, making debugging
|
||||
frustrating when you're looking in the wrong place. The browser shows a generic error
|
||||
page or 500 status, but no stack trace or useful error information appears in DevTools.
|
||||
|
||||
## Context / Trigger Conditions
|
||||
|
||||
This skill applies when:
|
||||
|
||||
- Page displays "Internal Server Error" or custom error page
|
||||
- Browser console shows no errors, or only a generic fetch failure
|
||||
- You're using `getServerSideProps`, `getStaticProps`, or API routes
|
||||
- Error only occurs on page refresh or direct navigation (not client-side transitions)
|
||||
- The error is intermittent and hard to reproduce in the browser
|
||||
|
||||
Common misleading symptoms:
|
||||
- "Unhandled Runtime Error" modal that doesn't show the real cause
|
||||
- Network tab shows 500 but response body is empty or generic
|
||||
- Error disappears when you add console.log (timing issue)
|
||||
|
||||
## Solution
|
||||
|
||||
### Step 1: Check the Terminal
|
||||
|
||||
The actual error with full stack trace appears in the terminal where `npm run dev`
|
||||
or `next dev` is running. This is the **first place to look**.
|
||||
|
||||
```bash
|
||||
# If you don't see the terminal, find the process
|
||||
ps aux | grep next
|
||||
# Or restart with visible output
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Step 2: Add Explicit Error Handling
|
||||
|
||||
For persistent debugging, wrap server-side code with try-catch:
|
||||
|
||||
```typescript
|
||||
export async function getServerSideProps(context) {
|
||||
try {
|
||||
const data = await fetchSomething();
|
||||
return { props: { data } };
|
||||
} catch (error) {
|
||||
console.error('getServerSideProps error:', error);
|
||||
// Return error state instead of throwing
|
||||
return { props: { error: error.message } };
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: For Production Errors
|
||||
|
||||
Check your hosting provider's logs:
|
||||
- **Vercel**: Dashboard → Project → Logs (Functions tab)
|
||||
- **AWS**: CloudWatch Logs
|
||||
- **Netlify**: Functions tab in dashboard
|
||||
- **Self-hosted**: Check your Node.js process logs
|
||||
|
||||
### Step 4: Common Causes
|
||||
|
||||
1. **Environment variables**: Missing in production but present locally
|
||||
2. **Database connections**: Connection string issues, cold starts
|
||||
3. **Import errors**: Server-only code accidentally imported on client
|
||||
4. **Async/await**: Missing await on async operations
|
||||
5. **JSON serialization**: Objects that can't be serialized (dates, functions)
|
||||
|
||||
## Verification
|
||||
|
||||
After checking the terminal, you should see:
|
||||
- Full stack trace with file name and line number
|
||||
- The actual error message (not generic 500)
|
||||
- Variable values if you added console.log statements
|
||||
|
||||
## Example
|
||||
|
||||
**Symptom**: User reports page shows "Internal Server Error" after clicking a link.
|
||||
|
||||
**Investigation**:
|
||||
1. Open browser DevTools → Console: Empty
|
||||
2. Network tab shows: `GET /dashboard → 500`
|
||||
3. Check terminal running `npm run dev`:
|
||||
|
||||
```
|
||||
Error: Cannot read property 'id' of undefined
|
||||
at getServerSideProps (/app/pages/dashboard.tsx:15:25)
|
||||
at renderToHTML (/app/node_modules/next/dist/server/render.js:428:22)
|
||||
```
|
||||
|
||||
**Cause found**: Database query returned `null` instead of user object.
|
||||
|
||||
## Notes
|
||||
|
||||
- In development, Next.js sometimes shows an error overlay, but it often has less
|
||||
detail than the terminal output
|
||||
- `reactStrictMode: true` in `next.config.js` causes double-execution of server
|
||||
functions in development, which can make debugging confusing
|
||||
- For API routes, the error appears in the same terminal as page errors
|
||||
- Client-side errors (in useEffect, event handlers) DO appear in browser console—
|
||||
this skill only applies to server-side code
|
||||
- If using `next start` (production mode locally), errors may be less verbose;
|
||||
check `NODE_ENV` and consider adding custom error logging
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
---
|
||||
name: prisma-connection-pool-exhaustion
|
||||
description: |
|
||||
Fix Prisma "Too many connections" and connection pool exhaustion errors in
|
||||
serverless environments (Vercel, AWS Lambda, Netlify). Use when: (1) Error
|
||||
"P2024: Timed out fetching a new connection from the pool", (2) PostgreSQL
|
||||
"too many connections for role", (3) Database works locally but fails in
|
||||
production serverless, (4) Intermittent database timeouts under load.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2024-02-20
|
||||
---
|
||||
|
||||
# Prisma Connection Pool Exhaustion in Serverless
|
||||
|
||||
## Problem
|
||||
|
||||
Serverless functions create a new Prisma client instance on each cold start. Each
|
||||
instance opens multiple database connections (default: 5 per instance). With many
|
||||
concurrent requests, this quickly exhausts the database's connection limit (often
|
||||
20-100 for managed databases).
|
||||
|
||||
## Context / Trigger Conditions
|
||||
|
||||
This skill applies when you see:
|
||||
|
||||
- `P2024: Timed out fetching a new connection from the connection pool`
|
||||
- PostgreSQL: `FATAL: too many connections for role "username"`
|
||||
- MySQL: `Too many connections`
|
||||
- Works fine locally with `npm run dev` but fails in production
|
||||
- Errors appear during traffic spikes, then resolve
|
||||
- Database dashboard shows connections at or near limit
|
||||
|
||||
Environment indicators:
|
||||
- Deploying to Vercel, AWS Lambda, Netlify Functions, or similar
|
||||
- Using Prisma with PostgreSQL, MySQL, or another connection-based database
|
||||
- Database is managed (PlanetScale, Supabase, Neon, RDS, etc.)
|
||||
|
||||
## Solution
|
||||
|
||||
### Step 1: Use Connection Pooling Service
|
||||
|
||||
The recommended solution is to use a connection pooler like PgBouncer or Prisma
|
||||
Accelerate, which sits between your serverless functions and the database.
|
||||
|
||||
**For Supabase:**
|
||||
```
|
||||
# .env
|
||||
# Use the pooled connection string (port 6543, not 5432)
|
||||
DATABASE_URL="postgresql://user:pass@db.xxx.supabase.co:6543/postgres?pgbouncer=true"
|
||||
```
|
||||
|
||||
**For Neon:**
|
||||
```
|
||||
# .env
|
||||
DATABASE_URL="postgresql://user:pass@ep-xxx.us-east-2.aws.neon.tech/dbname?sslmode=require"
|
||||
# Neon has built-in pooling
|
||||
```
|
||||
|
||||
**For Prisma Accelerate:**
|
||||
```bash
|
||||
npx prisma generate --accelerate
|
||||
```
|
||||
|
||||
### Step 2: Configure Prisma Connection Limits
|
||||
|
||||
In your `schema.prisma`:
|
||||
|
||||
```prisma
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = env("DATABASE_URL")
|
||||
// Limit connections per Prisma instance
|
||||
relationMode = "prisma"
|
||||
}
|
||||
```
|
||||
|
||||
In your connection URL or Prisma client:
|
||||
|
||||
```typescript
|
||||
// lib/prisma.ts
|
||||
import { PrismaClient } from '@prisma/client'
|
||||
|
||||
const globalForPrisma = global as unknown as { prisma: PrismaClient }
|
||||
|
||||
export const prisma = globalForPrisma.prisma || new PrismaClient({
|
||||
datasources: {
|
||||
db: {
|
||||
url: process.env.DATABASE_URL + '?connection_limit=1'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if (process.env.NODE_ENV !== 'production') globalForPrisma.prisma = prisma
|
||||
```
|
||||
|
||||
### Step 3: Singleton Pattern (Development)
|
||||
|
||||
Prevent hot-reload from creating new clients:
|
||||
|
||||
```typescript
|
||||
// lib/prisma.ts
|
||||
import { PrismaClient } from '@prisma/client'
|
||||
|
||||
const globalForPrisma = globalThis as unknown as {
|
||||
prisma: PrismaClient | undefined
|
||||
}
|
||||
|
||||
export const prisma = globalForPrisma.prisma ?? new PrismaClient()
|
||||
|
||||
if (process.env.NODE_ENV !== 'production') globalForPrisma.prisma = prisma
|
||||
```
|
||||
|
||||
### Step 4: URL Parameters
|
||||
|
||||
Add these to your connection string:
|
||||
|
||||
```
|
||||
?connection_limit=1&pool_timeout=20&connect_timeout=10
|
||||
```
|
||||
|
||||
- `connection_limit=1`: One connection per serverless instance
|
||||
- `pool_timeout=20`: Wait up to 20s for available connection
|
||||
- `connect_timeout=10`: Fail fast if can't connect in 10s
|
||||
|
||||
## Verification
|
||||
|
||||
After applying fixes:
|
||||
|
||||
1. Deploy to production
|
||||
2. Run a load test: `npx autocannon -c 100 -d 30 https://your-app.com/api/test`
|
||||
3. Check database dashboard—connections should stay within limits
|
||||
4. No more P2024 errors in logs
|
||||
|
||||
## Example
|
||||
|
||||
**Before** (error under load):
|
||||
```
|
||||
[ERROR] PrismaClientKnownRequestError:
|
||||
Invalid `prisma.user.findMany()` invocation:
|
||||
Timed out fetching a new connection from the connection pool.
|
||||
```
|
||||
|
||||
**After** (with connection pooling):
|
||||
```
|
||||
# Using Supabase pooler URL
|
||||
DATABASE_URL="postgresql://...@db.xxx.supabase.co:6543/postgres?pgbouncer=true&connection_limit=1"
|
||||
```
|
||||
|
||||
Database connections stable at 10-15 even under heavy load.
|
||||
|
||||
## Notes
|
||||
|
||||
- Different managed databases have different pooling solutions—check your provider's docs
|
||||
- PlanetScale (MySQL) uses a different architecture and doesn't have this issue
|
||||
- `connection_limit=1` is aggressive; start there and increase if you see latency
|
||||
- The singleton pattern only helps in development; in production serverless, each
|
||||
instance is isolated
|
||||
- If using Prisma with Next.js API routes, each route invocation may be a separate
|
||||
serverless function
|
||||
- Consider Prisma Accelerate for built-in caching + pooling: https://www.prisma.io/accelerate
|
||||
|
|
@ -0,0 +1,237 @@
|
|||
---
|
||||
name: typescript-circular-dependency
|
||||
description: |
|
||||
Detect and resolve TypeScript/JavaScript circular import dependencies. Use when:
|
||||
(1) "Cannot access 'X' before initialization" at runtime, (2) Import returns
|
||||
undefined unexpectedly, (3) "ReferenceError: Cannot access X before initialization",
|
||||
(4) Type errors that disappear when you change import order, (5) Jest/Vitest tests
|
||||
fail with undefined imports that work in browser.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2024-03-10
|
||||
---
|
||||
|
||||
# TypeScript Circular Dependency Detection and Resolution
|
||||
|
||||
## Problem
|
||||
|
||||
Circular dependencies occur when module A imports from module B, which imports
|
||||
(directly or indirectly) from module A. TypeScript compiles successfully, but at
|
||||
runtime, one of the imports evaluates to `undefined` because the module hasn't
|
||||
finished initializing yet.
|
||||
|
||||
## Context / Trigger Conditions
|
||||
|
||||
Common error messages:
|
||||
|
||||
```
|
||||
ReferenceError: Cannot access 'UserService' before initialization
|
||||
```
|
||||
|
||||
```
|
||||
TypeError: Cannot read properties of undefined (reading 'create')
|
||||
```
|
||||
|
||||
```
|
||||
TypeError: (0 , _service.doSomething) is not a function
|
||||
```
|
||||
|
||||
Symptoms that suggest circular imports:
|
||||
|
||||
- Import is `undefined` even though the export exists
|
||||
- Error only appears at runtime, not during TypeScript compilation
|
||||
- Moving an import statement changes which import is undefined
|
||||
- Tests fail but the app works (or vice versa)
|
||||
- Adding `console.log` at the top of a file changes behavior
|
||||
|
||||
## Solution
|
||||
|
||||
### Step 1: Detect the Cycle
|
||||
|
||||
Use a tool to visualize dependencies:
|
||||
|
||||
```bash
|
||||
# Install madge
|
||||
npm install -g madge
|
||||
|
||||
# Find circular dependencies
|
||||
madge --circular --extensions ts,tsx src/
|
||||
|
||||
# Generate visual graph
|
||||
madge --circular --image graph.svg src/
|
||||
```
|
||||
|
||||
Or use the TypeScript compiler:
|
||||
|
||||
```bash
|
||||
# Check for cycles (requires tsconfig setting)
|
||||
npx tsc --listFiles | head -50
|
||||
```
|
||||
|
||||
### Step 2: Identify the Pattern
|
||||
|
||||
Common circular dependency patterns:
|
||||
|
||||
**Pattern A: Service-to-Service**
|
||||
```
|
||||
services/userService.ts → services/orderService.ts → services/userService.ts
|
||||
```
|
||||
|
||||
**Pattern B: Type imports**
|
||||
```
|
||||
types/user.ts → types/order.ts → types/user.ts
|
||||
```
|
||||
|
||||
**Pattern C: Index barrel files**
|
||||
```
|
||||
components/index.ts → components/Button.tsx → components/index.ts
|
||||
```
|
||||
|
||||
### Step 3: Resolution Strategies
|
||||
|
||||
**Strategy 1: Extract Shared Dependencies**
|
||||
|
||||
Before:
|
||||
```typescript
|
||||
// userService.ts
|
||||
import { OrderService } from './orderService';
|
||||
export class UserService { ... }
|
||||
|
||||
// orderService.ts
|
||||
import { UserService } from './userService';
|
||||
export class OrderService { ... }
|
||||
```
|
||||
|
||||
After:
|
||||
```typescript
|
||||
// types/interfaces.ts (new file - no imports from services)
|
||||
export interface IUserService { ... }
|
||||
export interface IOrderService { ... }
|
||||
|
||||
// userService.ts
|
||||
import { IOrderService } from '../types/interfaces';
|
||||
export class UserService implements IUserService { ... }
|
||||
```
|
||||
|
||||
**Strategy 2: Dependency Injection**
|
||||
|
||||
```typescript
|
||||
// orderService.ts
|
||||
export class OrderService {
|
||||
constructor(private userService: IUserService) {}
|
||||
|
||||
// Instead of importing UserService directly
|
||||
}
|
||||
|
||||
// main.ts
|
||||
const userService = new UserService();
|
||||
const orderService = new OrderService(userService);
|
||||
```
|
||||
|
||||
**Strategy 3: Dynamic Imports**
|
||||
|
||||
```typescript
|
||||
// Only import when needed, not at module level
|
||||
async function processOrder() {
|
||||
const { UserService } = await import('./userService');
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
**Strategy 4: Use Type-Only Imports**
|
||||
|
||||
If you only need types (not values), use type-only imports:
|
||||
|
||||
```typescript
|
||||
// This doesn't create a runtime dependency
|
||||
import type { User } from './userService';
|
||||
```
|
||||
|
||||
**Strategy 5: Restructure Barrel Files**
|
||||
|
||||
Before (problematic):
|
||||
```typescript
|
||||
// components/index.ts
|
||||
export * from './Button';
|
||||
export * from './Modal'; // Modal imports Button from './index'
|
||||
```
|
||||
|
||||
After:
|
||||
```typescript
|
||||
// components/Modal.tsx
|
||||
import { Button } from './Button'; // Direct import, not from index
|
||||
```
|
||||
|
||||
### Step 4: Prevent Future Cycles
|
||||
|
||||
Add to your CI/build process:
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"check:circular": "madge --circular --extensions ts,tsx src/"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or configure ESLint:
|
||||
|
||||
```javascript
|
||||
// .eslintrc.js
|
||||
module.exports = {
|
||||
plugins: ['import'],
|
||||
rules: {
|
||||
'import/no-cycle': ['error', { maxDepth: 10 }]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
1. Run `madge --circular src/` - should report no cycles
|
||||
2. Run your test suite - previously undefined imports should work
|
||||
3. Delete `node_modules` and reinstall - app should still work
|
||||
4. Build for production - no runtime errors
|
||||
|
||||
## Example
|
||||
|
||||
**Problem**: `OrderService` is undefined when imported in `UserService`
|
||||
|
||||
**Detection**:
|
||||
```bash
|
||||
$ madge --circular src/
|
||||
Circular dependencies found!
|
||||
src/services/userService.ts → src/services/orderService.ts → src/services/userService.ts
|
||||
```
|
||||
|
||||
**Fix**: Extract shared interface
|
||||
|
||||
```typescript
|
||||
// NEW: src/types/services.ts
|
||||
export interface IOrderService {
|
||||
createOrder(userId: string): Promise<Order>;
|
||||
}
|
||||
|
||||
// MODIFIED: src/services/userService.ts
|
||||
import type { IOrderService } from '../types/services';
|
||||
|
||||
export class UserService {
|
||||
constructor(private orderService: IOrderService) {}
|
||||
}
|
||||
|
||||
// MODIFIED: src/services/orderService.ts
|
||||
// No longer imports UserService
|
||||
export class OrderService implements IOrderService {
|
||||
async createOrder(userId: string): Promise<Order> { ... }
|
||||
}
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- TypeScript `import type` is your friend—it's erased at runtime and can't cause cycles
|
||||
- Barrel files (`index.ts`) are a common source of accidental cycles
|
||||
- The order of exports in a file can matter when there's a cycle
|
||||
- Jest/Vitest may handle module resolution differently than your bundler
|
||||
- Some bundlers (Webpack, Vite) have better cycle handling than others
|
||||
- `require()` can sometimes mask circular dependency issues that `import` exposes
|
||||
183
dot_claude/skills/claudeception/resources/research-references.md
Normal file
183
dot_claude/skills/claudeception/resources/research-references.md
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
# Research References
|
||||
|
||||
This document compiles the academic research that informed the design of Claudeception.
|
||||
|
||||
## Core Papers
|
||||
|
||||
### Voyager: An Open-Ended Embodied Agent with Large Language Models
|
||||
|
||||
**Authors**: Wang, Xie, Jiang, Mandlekar, Xiao, Zhu, Fan, Anandkumar
|
||||
**Published**: May 2023
|
||||
**URL**: https://arxiv.org/abs/2305.16291
|
||||
|
||||
**Key Contribution**: First LLM-powered embodied lifelong learning agent with a skill library architecture.
|
||||
|
||||
**Relevant Concepts Applied**:
|
||||
|
||||
1. **Ever-Growing Skill Library**: Voyager maintains "an ever-growing skill library of executable code for storing and retrieving complex behaviors." This inspired our approach of extracting Claude Code skills as executable knowledge packages.
|
||||
|
||||
2. **Compositional Skills**: "The skills developed by Voyager are temporally extended, interpretable, and compositional, which compounds the agent's abilities rapidly and alleviates catastrophic forgetting." Our skill structure aims for similar composability.
|
||||
|
||||
3. **Self-Verification**: Voyager uses "self-verification for program improvement" before adding skills to the library. We implement similar quality gates before extraction.
|
||||
|
||||
4. **Iterative Prompting**: The "iterative prompting mechanism that incorporates environment feedback, execution errors" influenced our retrospective mode design.
|
||||
|
||||
---
|
||||
|
||||
### CASCADE: Cumulative Agentic Skill Creation through Autonomous Development and Evolution
|
||||
|
||||
**Authors**: [Research Team]
|
||||
**Published**: December 2024
|
||||
**URL**: https://arxiv.org/abs/2512.23880
|
||||
|
||||
**Key Contribution**: Self-evolving agentic framework demonstrating the transition from "LLM + tool use" to "LLM + skill acquisition."
|
||||
|
||||
**Relevant Concepts Applied**:
|
||||
|
||||
1. **Meta-Skills for Learning**: CASCADE demonstrates "continuous learning via web search and code extraction, and self-reflection via introspection." Our skill is itself a meta-skill for acquiring skills.
|
||||
|
||||
2. **Knowledge Codification**: "CASCADE accumulates executable skills that can be shared across agents" - this principle drives our skill extraction and storage approach.
|
||||
|
||||
3. **Memory Consolidation**: The framework uses memory consolidation to prevent forgetting and enable reuse. Our skill library serves a similar purpose.
|
||||
|
||||
---
|
||||
|
||||
### SEAgent: Self-Evolving Computer Use Agent with Autonomous Learning from Experience
|
||||
|
||||
**Authors**: Sun et al.
|
||||
**Published**: August 2025
|
||||
**URL**: https://arxiv.org/abs/2508.04700
|
||||
|
||||
**Key Contribution**: Framework enabling agents to autonomously evolve through interactions with unfamiliar software.
|
||||
|
||||
**Relevant Concepts Applied**:
|
||||
|
||||
1. **Experiential Learning**: "SEAgent empowers computer-use agents to autonomously master novel software environments via experiential learning, where agents explore new software, learn through iterative trial-and-error." Our retrospective mode captures this trial-and-error learning.
|
||||
|
||||
2. **Learning from Failures and Successes**: "The agent's policy is optimized through experiential learning from both failures and successes." We extract skills from both successful solutions and debugging processes.
|
||||
|
||||
3. **Curriculum Generation**: SEAgent uses a "Curriculum Generator" for increasingly diverse tasks. Our skill descriptions enable semantic matching to surface relevant skills.
|
||||
|
||||
---
|
||||
|
||||
### Reflexion: Language Agents with Verbal Reinforcement Learning
|
||||
|
||||
**Authors**: Shinn et al.
|
||||
**Published**: March 2023
|
||||
**URL**: https://arxiv.org/abs/2303.11366
|
||||
|
||||
**Key Contribution**: Framework for verbal reinforcement through linguistic feedback and self-reflection.
|
||||
|
||||
**Relevant Concepts Applied**:
|
||||
|
||||
1. **Self-Reflection Prompts**: "Reflexion converts feedback from the environment into linguistic feedback, also referred to as self-reflection." Our self-reflection prompts are directly inspired by this.
|
||||
|
||||
2. **Memory for Future Trials**: "These experiences (stored in long-term memory) are leveraged by the agent to rapidly improve decision-making." Skills serve as long-term memory.
|
||||
|
||||
3. **Verbal Reinforcement**: Instead of scalar rewards, Reflexion uses "nuanced feedback" in natural language. Our skill descriptions capture this nuanced knowledge.
|
||||
|
||||
---
|
||||
|
||||
### EvoFSM: Controllable Self-Evolution for Deep Research with Finite State Machines
|
||||
|
||||
**Authors**: [Research Team]
|
||||
**Published**: 2024
|
||||
|
||||
**Key Contribution**: Self-evolving framework with experience pools for continuous learning.
|
||||
|
||||
**Relevant Concepts Applied**:
|
||||
|
||||
1. **Self-Evolving Memory**: "EvoFSM integrates a Self-Evolving Memory mechanism, which distills successful strategies and failure patterns into an Experience Pool to enable continuous learning and warm-starting for future queries."
|
||||
|
||||
2. **Experience Pools**: The concept of storing strategies for later retrieval directly influenced our skill library design.
|
||||
|
||||
---
|
||||
|
||||
## Supporting Research
|
||||
|
||||
### Professional Agents: Evolving LLMs into Autonomous Experts
|
||||
|
||||
**URL**: https://arxiv.org/abs/2402.03628
|
||||
|
||||
Describes a framework for creating agents with specialized expertise through continuous learning. Influenced our quality criteria for what makes a skill worth extracting.
|
||||
|
||||
### Self-Reflection in LLM Agents: Effects on Problem-Solving Performance
|
||||
|
||||
**URL**: https://arxiv.org/abs/2405.06682
|
||||
|
||||
Empirical study showing self-reflection improves performance. Validated our use of reflection prompts for identifying extractable knowledge.
|
||||
|
||||
### Building Scalable and Reliable Agentic AI Systems
|
||||
|
||||
Comprehensive survey covering memory architectures, tool use, and continuous learning in agentic AI. Provided the broader architectural context for our design.
|
||||
|
||||
---
|
||||
|
||||
## Claude Code Skills Documentation
|
||||
|
||||
### Anthropic Engineering Blog: Equipping Agents for the Real World with Agent Skills
|
||||
|
||||
**URL**: https://www.anthropic.com/engineering/equipping-agents-for-the-real-world-with-agent-skills
|
||||
|
||||
**Key Insights**:
|
||||
|
||||
1. **Progressive Disclosure**: "Skills let Claude load information only as needed" - this enables scaling to many skills without context window bloat.
|
||||
|
||||
2. **Future Vision**: "We hope to enable agents to create, edit, and evaluate Skills on their own, letting them codify their own patterns of behavior into reusable capabilities." This skill is an implementation of this vision.
|
||||
|
||||
3. **Skill as Onboarding**: "Building a skill for an agent is like putting together an onboarding guide for a new hire." Our template follows this mental model.
|
||||
|
||||
### Claude Code Skills Documentation
|
||||
|
||||
**URL**: https://code.claude.com/docs/en/skills
|
||||
|
||||
**Key Insights**:
|
||||
|
||||
1. **SKILL.md Structure**: YAML frontmatter + markdown instructions
|
||||
2. **Description Importance**: Semantic matching relies on good descriptions
|
||||
3. **Allowed Tools**: Skills can restrict or enable specific tools
|
||||
4. **Location Options**: User-level vs. project-level installation
|
||||
|
||||
---
|
||||
|
||||
## Design Patterns Applied
|
||||
|
||||
### From Voyager
|
||||
- Skill library as executable code
|
||||
- Self-verification before adding to library
|
||||
- Compositional skill building
|
||||
|
||||
### From CASCADE
|
||||
- Meta-skills for learning
|
||||
- Knowledge codification into shareable format
|
||||
- Memory consolidation
|
||||
|
||||
### From SEAgent
|
||||
- Learning from both successes and failures
|
||||
- Experiential learning through trial-and-error
|
||||
- Progressive skill complexity
|
||||
|
||||
### From Reflexion
|
||||
- Self-reflection prompts
|
||||
- Verbal feedback over scalar rewards
|
||||
- Long-term memory storage
|
||||
|
||||
### From EvoFSM
|
||||
- Experience pools
|
||||
- Distilling strategies from sessions
|
||||
- Warm-starting future work
|
||||
|
||||
---
|
||||
|
||||
## Citation Format
|
||||
|
||||
If referencing this skill in academic work:
|
||||
|
||||
```
|
||||
@misc{claudeception,
|
||||
title={Claudeception: Autonomous Skill Extraction for LLM Agents},
|
||||
author={Claude Code},
|
||||
year={2024},
|
||||
note={Implements continuous learning patterns from Voyager, CASCADE, SEAgent, and Reflexion research}
|
||||
}
|
||||
```
|
||||
95
dot_claude/skills/claudeception/resources/skill-template.md
Normal file
95
dot_claude/skills/claudeception/resources/skill-template.md
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
name: [descriptive-kebab-case-name]
|
||||
description: |
|
||||
[REQUIRED: Precise description that enables semantic matching. Include:
|
||||
(1) What problem this solves
|
||||
(2) Specific trigger conditions - exact error messages, symptoms, scenarios
|
||||
(3) Key technologies/frameworks involved
|
||||
Use phrases like "Use when:", "Helps with:", "Solves:"]
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: YYYY-MM-DD
|
||||
---
|
||||
|
||||
# [Skill Name - Human Readable Title]
|
||||
|
||||
## Problem
|
||||
|
||||
[Clear, concise description of the problem this skill addresses.
|
||||
What pain point does this solve? Why is it non-obvious?]
|
||||
|
||||
## Context / Trigger Conditions
|
||||
|
||||
[When should this skill be activated? Be specific:]
|
||||
|
||||
- [Exact error message 1]
|
||||
- [Exact error message 2]
|
||||
- [Observable symptom or behavior]
|
||||
- [Environmental condition (framework, tool, platform)]
|
||||
|
||||
## Solution
|
||||
|
||||
[Step-by-step instructions to resolve the problem]
|
||||
|
||||
### Step 1: [First Action]
|
||||
|
||||
[Detailed instructions with code examples if applicable]
|
||||
|
||||
```language
|
||||
// Example code
|
||||
```
|
||||
|
||||
### Step 2: [Second Action]
|
||||
|
||||
[Continue with clear, actionable steps]
|
||||
|
||||
### Step 3: [Third Action]
|
||||
|
||||
[Include alternatives or variations if relevant]
|
||||
|
||||
## Verification
|
||||
|
||||
[How to confirm the solution worked:]
|
||||
|
||||
1. [Verification step 1]
|
||||
2. [Verification step 2]
|
||||
3. [Expected outcome]
|
||||
|
||||
## Example
|
||||
|
||||
**Scenario**: [Concrete example of when this skill applies]
|
||||
|
||||
**Before**:
|
||||
```
|
||||
[Error message or problematic code]
|
||||
```
|
||||
|
||||
**After**:
|
||||
```
|
||||
[Fixed code or successful output]
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
[Important caveats, edge cases, and related considerations:]
|
||||
|
||||
- [Caveat 1]
|
||||
- [Related skill or documentation link]
|
||||
- [Known limitation]
|
||||
- [When NOT to use this skill]
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Extraction Checklist (remove before saving):
|
||||
- [ ] Name is descriptive and uses kebab-case
|
||||
- [ ] Description includes specific error messages/symptoms
|
||||
- [ ] Problem is clearly stated
|
||||
- [ ] Trigger conditions are specific and searchable
|
||||
- [ ] Solution is step-by-step and actionable
|
||||
- [ ] Code examples are complete and tested
|
||||
- [ ] Verification steps are included
|
||||
- [ ] Example is concrete and realistic
|
||||
- [ ] Notes cover edge cases and caveats
|
||||
- [ ] No sensitive information (credentials, internal URLs)
|
||||
-->
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Claudeception Auto-Activation Hook
|
||||
# This hook ensures the claudeception skill evaluates every interaction
|
||||
# for extractable knowledge worth preserving.
|
||||
#
|
||||
# Installation:
|
||||
# 1. Copy this script to ~/.claude/hooks/
|
||||
# 2. Make it executable: chmod +x ~/.claude/hooks/claudeception-activator.sh
|
||||
# 3. Add to ~/.claude/settings.json (see README for details)
|
||||
|
||||
cat << 'EOF'
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
🧠 MANDATORY SKILL EVALUATION REQUIRED
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
CRITICAL: After completing this user request, you MUST evaluate whether
|
||||
it produced extractable knowledge using the claudeception skill.
|
||||
|
||||
EVALUATION PROTOCOL (NON-NEGOTIABLE):
|
||||
|
||||
1. COMPLETE the user's request first
|
||||
2. EVALUATE: Ask yourself:
|
||||
- Did this require non-obvious investigation or debugging?
|
||||
- Was the solution something that would help in future similar situations?
|
||||
- Did I discover something not immediately obvious from documentation?
|
||||
|
||||
3. IF YES to any question above:
|
||||
ACTIVATE: Use Skill(claudeception) NOW to extract the knowledge
|
||||
|
||||
4. IF NO to all questions:
|
||||
SKIP: No skill extraction needed
|
||||
|
||||
This is NOT optional. Failing to evaluate means valuable knowledge is lost.
|
||||
The claudeception skill will decide whether to actually create a new
|
||||
skill based on its quality criteria.
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
EOF
|
||||
122
dot_claude/skills/kubernetes-latest-tag-image-pull/SKILL.md
Normal file
122
dot_claude/skills/kubernetes-latest-tag-image-pull/SKILL.md
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
---
|
||||
name: kubernetes-latest-tag-image-pull
|
||||
description: |
|
||||
Fix for Kubernetes pods not pulling new Docker images with :latest tag after push.
|
||||
Use when: (1) pushed a new image with :latest tag but pods still run old code,
|
||||
(2) kubectl rollout restart doesn't pick up new image, (3) pod logs show old
|
||||
behavior despite image push confirmation. Covers debugging techniques and best
|
||||
practices for image versioning in Kubernetes deployments.
|
||||
author: Claude Code
|
||||
version: 1.1.0
|
||||
date: 2026-02-01
|
||||
---
|
||||
|
||||
# Kubernetes :latest Tag Image Pull Issues
|
||||
|
||||
## Problem
|
||||
After pushing a new Docker image with the `:latest` tag, Kubernetes pods continue
|
||||
running the old image version. Rolling restarts and pod deletions don't help because
|
||||
the node has the image cached.
|
||||
|
||||
## Context / Trigger Conditions
|
||||
- Pushed new image to registry with same `:latest` tag
|
||||
- `docker push` shows successful push with new digest
|
||||
- `kubectl rollout restart deployment/<name>` completes
|
||||
- Pod logs still show old application behavior
|
||||
- New pods created but running old code
|
||||
|
||||
## Solution
|
||||
|
||||
### Recommended: Use Specific Image Tags (Best Practice)
|
||||
|
||||
The proper solution is to use unique tags for each build instead of `:latest`:
|
||||
|
||||
```hcl
|
||||
# Use git SHA, version number, or timestamp
|
||||
container {
|
||||
name = "my-app"
|
||||
image = "myregistry/myapp:v1.2.3" # or :abc123 (git SHA)
|
||||
}
|
||||
```
|
||||
|
||||
This ensures Kubernetes always pulls the exact version you deployed.
|
||||
|
||||
### Quick Fix for Debugging: Temporary imagePullPolicy
|
||||
|
||||
**Only use this during active debugging, then remove it:**
|
||||
|
||||
```hcl
|
||||
container {
|
||||
name = "my-app"
|
||||
image = "myregistry/myapp:latest"
|
||||
image_pull_policy = "Always" # TEMPORARY - remove after debugging
|
||||
}
|
||||
```
|
||||
|
||||
**Why not leave it on permanently:**
|
||||
- Slows down pod startup (always pulls even when unchanged)
|
||||
- Increases registry bandwidth and costs
|
||||
- Deployment fails if registry is temporarily unavailable
|
||||
- Hides the real problem (using `:latest` in production)
|
||||
|
||||
### Alternative: Force Pull Without Config Change
|
||||
|
||||
Delete the pod to force Kubernetes to pull on the new node scheduling:
|
||||
|
||||
```bash
|
||||
# Delete pods to force fresh pull
|
||||
kubectl -n <namespace> delete pod -l app=<app-name>
|
||||
|
||||
# Or scale down and up
|
||||
kubectl -n <namespace> scale deployment <name> --replicas=0
|
||||
kubectl -n <namespace> scale deployment <name> --replicas=1
|
||||
```
|
||||
|
||||
## Verification
|
||||
```bash
|
||||
# Check pod is running new image
|
||||
kubectl -n <namespace> describe pod <pod-name> | grep "Image:"
|
||||
|
||||
# Check logs show new behavior
|
||||
kubectl -n <namespace> logs deployment/<deployment-name> --tail=20
|
||||
|
||||
# Verify image digest matches what was pushed
|
||||
kubectl -n <namespace> get pods -o jsonpath='{.items[*].status.containerStatuses[*].imageID}'
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
**Scenario**: API pod still running `./start.sh` after pushing image with `uvicorn` CMD
|
||||
|
||||
**Best approach** - Use versioned tags:
|
||||
```hcl
|
||||
container {
|
||||
name = "realestate-crawler-api"
|
||||
image = "viktorbarzin/realestatecrawler:v1.2.0" # Specific version
|
||||
}
|
||||
```
|
||||
|
||||
**Quick debug approach** - Temporary imagePullPolicy (remove after fixing):
|
||||
```hcl
|
||||
container {
|
||||
name = "realestate-crawler-api"
|
||||
image = "viktorbarzin/realestatecrawler:latest"
|
||||
image_pull_policy = "Always" # TEMPORARY
|
||||
}
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Default behavior**: For `:latest` tag, Kubernetes defaults to `IfNotPresent`, which
|
||||
means it won't pull if any image with that tag exists on the node
|
||||
- **Best practice**: Use specific version tags (e.g., `v1.2.3`, git SHA, or build number)
|
||||
for all deployments, especially production
|
||||
- **CI/CD integration**: Have your pipeline tag images with git SHA or build ID automatically
|
||||
- **imagePullPolicy: Always is a debugging tool, not a solution** - it masks the underlying
|
||||
problem of using mutable tags
|
||||
- **Node caching**: Even deleting pods doesn't clear the node's image cache, but it can
|
||||
trigger a re-pull if the image was garbage collected
|
||||
|
||||
## References
|
||||
- [Kubernetes: Updating Images](https://kubernetes.io/docs/concepts/containers/images/#updating-images)
|
||||
- [Kubernetes imagePullPolicy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy)
|
||||
152
dot_claude/skills/react-hooks-order-early-return/SKILL.md
Normal file
152
dot_claude/skills/react-hooks-order-early-return/SKILL.md
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
---
|
||||
name: react-hooks-order-early-return
|
||||
description: |
|
||||
Fix for "Rendered more hooks than during the previous render" or "React has detected
|
||||
a change in the order of Hooks called by Component" errors. Use when: (1) useEffect
|
||||
is placed after an early return statement, (2) conditional rendering causes different
|
||||
hook counts between renders, (3) hooks are called inside conditions or loops.
|
||||
Covers React Rules of Hooks violations and proper hook ordering.
|
||||
author: Claude Code
|
||||
version: 1.0.0
|
||||
date: 2026-02-01
|
||||
---
|
||||
|
||||
# React Hooks Order Violation with Early Returns
|
||||
|
||||
## Problem
|
||||
React throws "Rendered more hooks than during the previous render" or similar hook
|
||||
ordering errors when hooks are called conditionally or after early return statements.
|
||||
|
||||
## Context / Trigger Conditions
|
||||
- Error: "React has detected a change in the order of Hooks called by [Component]"
|
||||
- Error: "Rendered more hooks than during the previous render"
|
||||
- Component has an early return (e.g., `if (!user) return <LoginModal />`)
|
||||
- Hooks (useState, useEffect, useCallback, etc.) are defined after the early return
|
||||
- Error appears when state changes cause the early return to be skipped
|
||||
|
||||
## Solution
|
||||
|
||||
### Rule: All hooks must be called before any early returns
|
||||
|
||||
**Wrong** (hooks after early return):
|
||||
```tsx
|
||||
function App() {
|
||||
const [user, setUser] = useState(null);
|
||||
|
||||
// Early return
|
||||
if (!user) {
|
||||
return <LoginModal />;
|
||||
}
|
||||
|
||||
// BUG: This useEffect only runs when user exists
|
||||
// causing different hook count between renders
|
||||
useEffect(() => {
|
||||
loadUserData();
|
||||
}, []);
|
||||
|
||||
return <Dashboard user={user} />;
|
||||
}
|
||||
```
|
||||
|
||||
**Correct** (all hooks before early return):
|
||||
```tsx
|
||||
function App() {
|
||||
const [user, setUser] = useState(null);
|
||||
|
||||
// All hooks must be called unconditionally
|
||||
useEffect(() => {
|
||||
if (user) {
|
||||
loadUserData();
|
||||
}
|
||||
}, [user]);
|
||||
|
||||
// Early return after all hooks
|
||||
if (!user) {
|
||||
return <LoginModal />;
|
||||
}
|
||||
|
||||
return <Dashboard user={user} />;
|
||||
}
|
||||
```
|
||||
|
||||
### For Complex Logic: Use useCallback
|
||||
|
||||
When you need to call a function from multiple places (including effects):
|
||||
|
||||
```tsx
|
||||
function App() {
|
||||
const [user, setUser] = useState(null);
|
||||
const [data, setData] = useState(null);
|
||||
|
||||
// Define callback BEFORE early return
|
||||
const loadData = useCallback(async () => {
|
||||
if (!user) return;
|
||||
const result = await fetchData(user);
|
||||
setData(result);
|
||||
}, [user]);
|
||||
|
||||
// Effect uses the callback
|
||||
useEffect(() => {
|
||||
loadData();
|
||||
}, [user, loadData]);
|
||||
|
||||
// Early return AFTER all hooks
|
||||
if (!user) {
|
||||
return <LoginModal />;
|
||||
}
|
||||
|
||||
return <Dashboard data={data} />;
|
||||
}
|
||||
```
|
||||
|
||||
## Verification
|
||||
1. Component renders without hook errors
|
||||
2. State changes that affect early return don't cause crashes
|
||||
3. React DevTools shows consistent hook order
|
||||
|
||||
## Example
|
||||
|
||||
**Scenario**: Auto-loading data when user authenticates
|
||||
|
||||
**Before** (broken):
|
||||
```tsx
|
||||
if (!user) {
|
||||
return <LoginModal />;
|
||||
}
|
||||
|
||||
// useEffect after early return - BREAKS
|
||||
useEffect(() => {
|
||||
onSubmit('visualize', defaultParams);
|
||||
}, []);
|
||||
```
|
||||
|
||||
**After** (fixed):
|
||||
```tsx
|
||||
// useCallback defined before early return
|
||||
const loadListings = useCallback(async (params) => {
|
||||
if (!user) return;
|
||||
// ... loading logic
|
||||
}, [user]);
|
||||
|
||||
// useEffect before early return, with user check inside
|
||||
useEffect(() => {
|
||||
if (!user) return;
|
||||
loadListings(defaultParams);
|
||||
}, [user, loadListings]);
|
||||
|
||||
// Early return AFTER all hooks
|
||||
if (!user) {
|
||||
return <LoginModal />;
|
||||
}
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- **Rules of Hooks**: Hooks must be called in the same order on every render
|
||||
- **No conditional hooks**: Never put hooks inside if statements, loops, or after returns
|
||||
- **Linting**: ESLint plugin `eslint-plugin-react-hooks` catches most violations
|
||||
- **The fix pattern**: Move hook definitions before early returns, add conditions inside hooks
|
||||
|
||||
## References
|
||||
- [React: Rules of Hooks](https://react.dev/reference/rules/rules-of-hooks)
|
||||
- [React: Hooks FAQ](https://react.dev/learn/reusing-logic-with-custom-hooks)
|
||||
Loading…
Add table
Add a link
Reference in a new issue