move memory hooks to ~/.claude/hooks for sandbox compatibility

This commit is contained in:
Viktor Barzin 2026-03-15 11:08:52 +00:00
parent f9c45b74de
commit 4695ac9121
6 changed files with 727 additions and 5 deletions

View file

@ -0,0 +1,69 @@
#!/usr/bin/env python3
"""
Auto-allow hook for claude-memory plugin tools.
This PermissionRequest hook automatically allows any tool whose name matches
the claude_memory MCP server pattern to proceed without user confirmation.
Environment variables:
DEBUG_CLAUDE_MEMORY_HOOKS=1 Enable debug logging to stderr
DISABLE_CLAUDE_MEMORY_AUTO_APPROVE=1 Disable auto-approve (for debugging)
"""
import json
import os
import re
import sys
DEBUG = os.environ.get("DEBUG_CLAUDE_MEMORY_HOOKS", "").lower() in ("1", "true", "yes")
DISABLED = os.environ.get("DISABLE_CLAUDE_MEMORY_AUTO_APPROVE", "").lower() in (
"1",
"true",
"yes",
)
# Match any tool from this plugin's MCP server, resilient to slug variations
# e.g. mcp__plugin_claude-memory_claude_memory__memory_store
# mcp__claude_memory__memory_recall
TOOL_PATTERN = re.compile(r"mcp__.*claude_memory__(?:memory_|secret_)")
def debug(msg: str) -> None:
"""Print debug message to stderr if DEBUG is enabled."""
if DEBUG:
print(f"[claude-memory] {msg}", file=sys.stderr)
def main() -> None:
if DISABLED:
debug("Auto-approve disabled via DISABLE_CLAUDE_MEMORY_AUTO_APPROVE")
sys.exit(0)
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
debug(f"Permission request for: {tool_name}")
if TOOL_PATTERN.search(tool_name):
debug(f"Auto-allowing: {tool_name}")
output = {
"hookSpecificOutput": {
"hookEventName": "PermissionRequest",
"decision": {
"behavior": "allow",
},
}
}
json.dump(output, sys.stdout)
else:
debug(f"Not a claude-memory tool, passing through: {tool_name}")
sys.exit(0)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,458 @@
#!/usr/bin/env python3
"""
Stop hook (async): automatic learning extraction via haiku-as-judge.
After each Claude response, reads the recent conversation window and uses
haiku to detect learnings worth persisting:
- User corrections, preferences, decisions, facts (original scope)
- Debugging insights: error root cause fix mappings
- Architectural patterns and workarounds discovered during work
- Service/tool-specific operational knowledge
Features:
- Multi-turn context window (last 5 exchanges by default)
- State tracking to avoid duplicate extraction
- Writes to memory API/SQLite AND auto-memory markdown files
- Throttled deep extraction: full window every ~5 turns, single-turn otherwise
Runs with async: true does NOT block the user.
"""
import hashlib
import io
import json
import logging
import os
import shutil
import subprocess
import sys
import urllib.error
import urllib.request
from datetime import datetime, timezone
from pathlib import Path
logger = logging.getLogger(__name__)
API_BASE_URL = os.environ.get("MEMORY_API_URL") or os.environ.get("CLAUDE_MEMORY_API_URL", "")
API_KEY = os.environ.get("MEMORY_API_KEY") or os.environ.get("CLAUDE_MEMORY_API_KEY", "")
# How many turns between deep (multi-turn) extractions
DEEP_EXTRACTION_INTERVAL = 5
# Max exchanges to include in deep extraction
DEEP_WINDOW_SIZE = 5
# Max chars per message in the context window
MAX_MSG_CHARS = 3000
# State directory
STATE_DIR = Path.home() / ".claude" / "auto-learn-state"
SINGLE_TURN_PROMPT = """You are a memory extraction judge. Analyze this single exchange between a user and an AI assistant.
USER MESSAGE:
{user_message}
ASSISTANT RESPONSE:
{assistant_response}
Your job: determine if any of these learning events occurred:
1. USER CORRECTION user corrected the assistant's mistake or misunderstanding
2. PREFERENCE user stated a preference, habit, or "I like/prefer/want" statement
3. DECISION a decision was reached about how to do something
4. FACT user shared a durable fact about themselves, their team, tools, or environment
If ANY learning event occurred, return JSON:
{{"events": [{{"type": "correction|preference|decision|fact", "content": "concise fact to remember (one sentence)", "importance": 0.7, "tags": "comma,separated,tags", "expanded_keywords": "space-separated semantically related search terms for recall (minimum 5 words)", "supersedes": null}}]}}
If NO learning event occurred, return:
{{"events": []}}
Rules:
- Only extract DURABLE facts, not transient task details ("fix this file", "run tests")
- Corrections are highest value (0.8-0.9)
- Be conservative false negatives are better than false positives
- "supersedes" should be a search query to find the old outdated memory, or null
- Return ONLY valid JSON, no other text"""
DEEP_EXTRACTION_PROMPT = """You are a knowledge extraction system. Analyze this multi-turn conversation between a user and an AI assistant working on software engineering tasks.
CONVERSATION (last {n_exchanges} exchanges):
{conversation}
Extract any DURABLE knowledge worth remembering across sessions. Look for:
1. **CORRECTIONS** user corrected a mistake or misunderstanding (importance: 0.8-0.9)
2. **PREFERENCES** user stated how they like things done (importance: 0.7-0.8)
3. **DECISIONS** architectural or design decisions reached (importance: 0.7-0.8)
4. **FACTS** durable facts about user, team, tools, environment (importance: 0.6-0.8)
5. **DEBUGGING INSIGHTS** error root cause fix patterns that would help next time (importance: 0.7-0.9)
6. **WORKAROUNDS** things that didn't work and what did instead (importance: 0.7-0.8)
7. **OPERATIONAL KNOWLEDGE** service-specific learnings, config gotchas, resource requirements (importance: 0.7-0.8)
Return JSON:
{{"events": [{{"type": "correction|preference|decision|fact|debugging|workaround|operational", "content": "concise knowledge to remember (1-3 sentences max)", "importance": 0.7, "tags": "comma,separated,relevant,tags", "expanded_keywords": "space-separated semantically related search terms for recall (minimum 5 words)", "supersedes": null}}]}}
If NO durable knowledge was found, return:
{{"events": []}}
Rules:
- Only extract DURABLE knowledge, not transient task context ("reading file X", "running command Y")
- Don't extract things that are obvious from the codebase (file paths, function names)
- DO extract: "X doesn't work because Y — use Z instead", "service A needs B config", "always do X before Y"
- Merge related learnings into single events rather than splitting into tiny fragments
- If a debugging session revealed the root cause of an issue, capture the errorcausefix chain
- "supersedes" should be a search query to find an old outdated memory this replaces, or null
- Maximum 5 events per extraction prioritize by importance
- Return ONLY valid JSON, no other text"""
def _get_state_path(session_id: str) -> Path:
"""Get state file path for this session."""
STATE_DIR.mkdir(parents=True, exist_ok=True)
return STATE_DIR / f"{session_id}.json"
def _load_state(session_id: str) -> dict:
"""Load extraction state for this session."""
path = _get_state_path(session_id)
if path.exists():
try:
return json.loads(path.read_text())
except (json.JSONDecodeError, OSError):
pass
return {"turn_count": 0, "extracted_hashes": [], "last_deep_turn": 0}
def _save_state(session_id: str, state: dict) -> None:
"""Save extraction state for this session."""
path = _get_state_path(session_id)
try:
path.write_text(json.dumps(state))
except OSError:
pass
def _cleanup_old_state() -> None:
"""Remove state files older than 24 hours."""
if not STATE_DIR.exists():
return
now = datetime.now().timestamp()
try:
for f in STATE_DIR.iterdir():
if f.suffix == ".json" and (now - f.stat().st_mtime) > 86400:
f.unlink(missing_ok=True)
except OSError:
pass
def _content_hash(content: str) -> str:
"""Hash content for deduplication."""
return hashlib.sha256(content.encode()).hexdigest()[:16]
def _parse_transcript(transcript_path: str, max_exchanges: int = 1) -> list[dict]:
"""
Parse the transcript and return the last N exchanges as
[{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}, ...]
"""
try:
MAX_TAIL_BYTES = max_exchanges * 100_000 # ~100KB per exchange should be plenty
with open(transcript_path, "rb") as f:
f.seek(0, io.SEEK_END)
size = f.tell()
f.seek(max(0, size - MAX_TAIL_BYTES))
tail = f.read().decode("utf-8", errors="replace")
lines = tail.split("\n")
except Exception:
return []
entries = []
for line in lines:
line = line.strip()
if not line:
continue
try:
entry = json.loads(line)
except json.JSONDecodeError:
continue
# Transcript format: role can be at top level or nested in message
msg = entry.get("message", entry)
role = msg.get("role", "") or entry.get("type", "")
if role not in ("user", "assistant"):
continue
content = msg.get("content", "")
if isinstance(content, list):
content = " ".join(
b.get("text", "") for b in content
if isinstance(b, dict) and b.get("type") == "text"
)
content = str(content)[:MAX_MSG_CHARS]
if content.strip():
entries.append({"role": role, "content": content})
# Extract the last N exchanges (user+assistant pairs)
# Walk backwards to find pairs
exchanges = []
i = len(entries) - 1
while i >= 0 and len(exchanges) < max_exchanges * 2:
exchanges.insert(0, entries[i])
i -= 1
# Trim to last N complete exchanges
result = []
pair_count = 0
for entry in reversed(exchanges):
result.insert(0, entry)
if entry["role"] == "user":
pair_count += 1
if pair_count >= max_exchanges:
break
return result
def _api_request(method: str, path: str, body: dict | None = None) -> dict:
url = f"{API_BASE_URL}{path}"
data = json.dumps(body).encode() if body else None
req = urllib.request.Request(
url, data=data, method=method,
headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"},
)
with urllib.request.urlopen(req, timeout=15) as resp:
return json.loads(resp.read().decode())
def _store_via_api(content, category, tags, importance, expanded_keywords):
_api_request("POST", "/api/memories", {
"content": content, "category": category, "tags": tags,
"expanded_keywords": expanded_keywords, "importance": importance,
})
def _store_via_sqlite(content, category, tags, importance, expanded_keywords):
import sqlite3
memory_home = os.environ.get("MEMORY_HOME", os.path.expanduser("~/.claude/claude-memory"))
db_path = os.path.join(memory_home, "memory", "memory.db")
if not os.path.exists(db_path):
legacy_db = os.path.join(os.path.expanduser("~/.claude/metaclaw"), "memory", "memory.db")
if os.path.exists(legacy_db):
db_path = legacy_db
conn = sqlite3.connect(db_path, timeout=10.0)
conn.execute("PRAGMA journal_mode=WAL")
now = datetime.now(timezone.utc).isoformat()
conn.execute(
"INSERT INTO memories (content, category, tags, importance, expanded_keywords, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?)",
(content, category, tags, importance, expanded_keywords, now, now),
)
conn.commit()
conn.close()
def _append_to_auto_memory(content: str, event_type: str) -> None:
"""Append a learning to the auto-memory markdown file for the current project."""
# Find the project memory directory based on CWD
cwd = os.getcwd()
# Claude Code stores project memory at ~/.claude/projects/<escaped-path>/memory/
escaped = cwd.replace("/", "-")
if escaped.startswith("-"):
escaped = escaped[1:] # Remove leading dash
memory_dir = Path.home() / ".claude" / "projects" / f"-{escaped}" / "memory"
if not memory_dir.exists():
# Try without the leading dash
memory_dir = Path.home() / ".claude" / "projects" / escaped / "memory"
if not memory_dir.exists():
return
auto_learn_file = memory_dir / "auto-learned.md"
now = datetime.now(timezone.utc).strftime("%Y-%m-%d")
header = "# Auto-Learned Knowledge\n\nAutomatically extracted by the auto-learn hook. Review periodically and promote valuable entries to MEMORY.md.\n\n"
if not auto_learn_file.exists():
auto_learn_file.write_text(header)
# Append the new learning
with open(auto_learn_file, "a") as f:
f.write(f"- [{now}] **{event_type}**: {content}\n")
def _call_judge(prompt: str) -> list[dict]:
"""Call haiku as judge and return extracted events."""
try:
result = subprocess.run(
["claude", "-p", prompt, "--model", "haiku"],
capture_output=True, text=True, timeout=45,
env={**os.environ, "CLAUDECODE": ""},
)
if result.returncode != 0:
return []
response_text = result.stdout.strip()
# Strip markdown code fences if present
if response_text.startswith("```"):
lines = response_text.split("\n")
lines = [l for l in lines if not l.strip().startswith("```")]
response_text = "\n".join(lines).strip()
judge_result = json.loads(response_text)
return judge_result.get("events", [])
except (subprocess.TimeoutExpired, json.JSONDecodeError, OSError):
return []
def _format_conversation(entries: list[dict]) -> str:
"""Format conversation entries for the judge prompt."""
parts = []
for entry in entries:
role = "USER" if entry["role"] == "user" else "ASSISTANT"
parts.append(f"[{role}]: {entry['content']}")
return "\n\n".join(parts)
def _store_events(events: list[dict], extracted_hashes: list[str]) -> list[str]:
"""Store extracted events, return new hashes."""
category_map = {
"correction": "preferences",
"preference": "preferences",
"decision": "decisions",
"fact": "facts",
"debugging": "decisions",
"workaround": "decisions",
"operational": "facts",
}
new_hashes = []
for event in events:
content = event.get("content", "")
if not content:
continue
# Deduplication: skip if we've already extracted this
h = _content_hash(content)
if h in extracted_hashes:
continue
event_type = event.get("type", "fact")
importance = max(0.0, min(1.0, float(event.get("importance", 0.7))))
category = category_map.get(event_type, "facts")
tags = event.get("tags", f"auto-learned,{event_type}")
if "auto-learned" not in tags:
tags = f"auto-learned,{tags}"
expanded_keywords = event.get("expanded_keywords", "")
# Store to memory API or SQLite
try:
if API_KEY and API_BASE_URL:
_store_via_api(content, category, tags, importance, expanded_keywords)
else:
_store_via_sqlite(content, category, tags, importance, expanded_keywords)
except Exception:
pass
# Also append to auto-memory markdown
try:
_append_to_auto_memory(content, event_type)
except Exception:
pass
new_hashes.append(h)
return new_hashes
def main() -> None:
if not shutil.which("claude"):
return
try:
hook_input = json.load(sys.stdin)
except (json.JSONDecodeError, EOFError):
return
if isinstance(hook_input, dict) and hook_input.get("stop_hook_active", False):
return
transcript_path = ""
session_id = ""
if isinstance(hook_input, dict):
transcript_path = hook_input.get("transcript_path", "")
session_id = hook_input.get("session_id", "")
if not transcript_path or not os.path.exists(transcript_path):
return
# Derive session ID from transcript path if not provided
if not session_id:
session_id = hashlib.sha256(transcript_path.encode()).hexdigest()[:16]
# Load state
state = _load_state(session_id)
state["turn_count"] = state.get("turn_count", 0) + 1
turn_count = state["turn_count"]
last_deep_turn = state.get("last_deep_turn", 0)
extracted_hashes = state.get("extracted_hashes", [])
# Decide: single-turn (cheap) or deep (multi-turn) extraction
turns_since_deep = turn_count - last_deep_turn
do_deep = turns_since_deep >= DEEP_EXTRACTION_INTERVAL
if do_deep:
# Deep extraction: read last N exchanges
entries = _parse_transcript(transcript_path, max_exchanges=DEEP_WINDOW_SIZE)
if len(entries) < 2:
_save_state(session_id, state)
return
# Count actual exchanges
n_exchanges = sum(1 for e in entries if e["role"] == "user")
conversation = _format_conversation(entries)
prompt = DEEP_EXTRACTION_PROMPT.format(
n_exchanges=n_exchanges,
conversation=conversation[:8000], # Cap total context
)
events = _call_judge(prompt)
state["last_deep_turn"] = turn_count
else:
# Single-turn extraction: just the last exchange
entries = _parse_transcript(transcript_path, max_exchanges=1)
if len(entries) < 2:
_save_state(session_id, state)
return
user_msg = ""
assistant_msg = ""
for entry in entries:
if entry["role"] == "user":
user_msg = entry["content"]
elif entry["role"] == "assistant":
assistant_msg = entry["content"]
if not user_msg or len(user_msg.strip()) < 10:
_save_state(session_id, state)
return
prompt = SINGLE_TURN_PROMPT.format(
user_message=user_msg,
assistant_response=assistant_msg[:2000],
)
events = _call_judge(prompt)
# Store events
if events:
new_hashes = _store_events(events, extracted_hashes)
extracted_hashes.extend(new_hashes)
# Keep hash list bounded
if len(extracted_hashes) > 200:
extracted_hashes = extracted_hashes[-200:]
state["extracted_hashes"] = extracted_hashes
_save_state(session_id, state)
# Periodic cleanup of old state files
if turn_count % 20 == 0:
_cleanup_old_state()
if __name__ == "__main__":
main()

View file

@ -0,0 +1,64 @@
#!/bin/bash
# UserPromptSubmit hook: Inject recovery context after compaction
# This hook runs on each user prompt, but only injects context once after compaction.
# Read hook input from stdin
INPUT=$(cat)
# Extract session ID
SESSION_ID=$(echo "$INPUT" | jq -r '.session_id // .sessionId // "unknown"')
# Define marker path
MEMORY_HOME="${MEMORY_HOME:-$HOME/.claude/claude-memory}"
MARKER_DIR="${MEMORY_HOME}/state/compaction-markers"
MARKER_FILE="${MARKER_DIR}/${SESSION_ID}.json"
# Fast path: no marker means no recent compaction, exit immediately
if [ ! -f "$MARKER_FILE" ]; then
exit 0
fi
# Read marker contents
MARKER=$(cat "$MARKER_FILE")
# Validate JSON before processing
if ! echo "$MARKER" | jq -e . >/dev/null 2>&1; then
rm -f "$MARKER_FILE"
exit 0
fi
# Extract data from marker
COMPACTED_AT=$(echo "$MARKER" | jq -r '.compactedAt // "unknown"')
PERSONALITY=$(echo "$MARKER" | jq -r '.personalityReminder // ""')
# Build remembered facts summary (limit to ~500 chars)
FACTS_SUMMARY=$(echo "$MARKER" | jq -r '
.rememberedFacts[:10] |
map("- [\(.category // "fact")] \(.content)") |
join("\n")
' 2>/dev/null || echo "")
# Build recovery context (kept under 1000 tokens)
RECOVERY_CONTEXT="[Claude Memory Recovery - Context compacted at ${COMPACTED_AT}]
${PERSONALITY}
Key memories from before compaction:
${FACTS_SUMMARY}
Use the memory_recall MCP tool if you need more context about past conversations."
# Output JSON with additional context for injection
cat << EOF
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit",
"additionalContext": $(echo "$RECOVERY_CONTEXT" | jq -Rs .)
}
}
EOF
# Delete marker file (one-time injection)
rm -f "$MARKER_FILE"
exit 0

View file

@ -0,0 +1,43 @@
#!/bin/bash
# PreCompact hook: Save key memories before compaction
set -e
INPUT=$(cat)
SESSION_ID=$(echo "$INPUT" | jq -r '.session_id // .sessionId // "unknown"')
MEMORY_HOME="${MEMORY_HOME:-$HOME/.claude/claude-memory}"
MARKER_DIR="${MEMORY_HOME}/state/compaction-markers"
MEMORY_DB="${MEMORY_HOME}/memory/memory.db"
MARKER_FILE="${MARKER_DIR}/${SESSION_ID}.json"
mkdir -p "$MARKER_DIR"
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
# Try API first, fall back to SQLite
REMEMBERED_FACTS="[]"
if [ -n "${MEMORY_API_KEY:-${CLAUDE_MEMORY_API_KEY:-}}" ]; then
API_KEY="${MEMORY_API_KEY:-${CLAUDE_MEMORY_API_KEY:-}}"
API_URL="${MEMORY_API_URL:-${CLAUDE_MEMORY_API_URL:-}}"
if [ -n "$API_URL" ]; then
REMEMBERED_FACTS=$(curl -sf -H "Authorization: Bearer ${API_KEY}" \
"${API_URL}/api/memories?limit=20" 2>/dev/null | \
jq '[.memories[] | {content, category, importance}]' 2>/dev/null || echo "[]")
fi
elif [ -f "$MEMORY_DB" ]; then
REMEMBERED_FACTS=$(sqlite3 -json "$MEMORY_DB" \
"SELECT content, category, importance FROM memories ORDER BY importance DESC, created_at DESC LIMIT 20" 2>/dev/null || echo "[]")
fi
if ! echo "$REMEMBERED_FACTS" | jq empty 2>/dev/null; then
REMEMBERED_FACTS="[]"
fi
jq -n \
--arg sid "$SESSION_ID" \
--arg ts "$TIMESTAMP" \
--argjson facts "$REMEMBERED_FACTS" \
'{sessionId: $sid, compactedAt: $ts, rememberedFacts: $facts}' \
> "$MARKER_FILE"
exit 0

View file

@ -0,0 +1,88 @@
#!/usr/bin/env python3
"""
UserPromptSubmit hook: nudge Claude to check memories before responding.
Instead of searching SQLite directly (which can't generate expanded_query),
this hook injects a prompt instruction telling Claude to call memory_recall
with proper expanded_query. The model handles the semantic expansion.
Skips injection for short prompts, code blocks, and tool outputs.
"""
import json
import os
import sys
RECALL_INSTRUCTION = (
"MANDATORY: Call memory_recall with context and expanded_query BEFORE responding "
"to this message. This is a BLOCKING REQUIREMENT — do NOT answer, summarize, or "
"take action until you have checked stored memories. The user's preferences, past "
"corrections, and decisions are stored there and MUST inform your response.\n\n"
"ONLY EXCEPTION: Skip memory_recall if the message is a direct technical command "
"with zero personal context (e.g., 'fix this lint error', 'run tests', 'read file X'). "
"Questions, discussions, opinions, design tasks, and anything involving preferences "
"are NOT exceptions — check memory for those.\n\n"
"NEVER mention the memory check to the user — not when you call it, not when you skip it, "
"not when it returns nothing. It should be completely invisible. "
"If it returns results, incorporate them naturally into your response."
)
def main() -> None:
# Read hook input from stdin
try:
hook_input = json.load(sys.stdin)
except (json.JSONDecodeError, EOFError):
return
# Extract user's prompt
prompt = ""
if isinstance(hook_input, dict):
prompt = hook_input.get("prompt", "")
if not prompt:
prompt = hook_input.get("user_prompt", "")
if not prompt:
content = hook_input.get("content", "")
if isinstance(content, str):
prompt = content
if not prompt or len(prompt.strip()) < 10:
return # Too short to warrant memory check
# Skip obviously irrelevant prompts
stripped = prompt.strip()
if (
stripped.startswith("```")
or stripped.startswith("{")
or stripped.startswith("<")
):
return
# Skip if memory DB doesn't exist (no memories to recall)
memory_home = os.environ.get(
"MEMORY_HOME", os.path.expanduser("~/.claude/claude-memory")
)
db_path = os.path.join(memory_home, "memory", "memory.db")
# Also check legacy path for migration
legacy_home = os.path.expanduser("~/.claude/metaclaw")
legacy_db = os.path.join(legacy_home, "memory", "memory.db")
if not os.path.exists(db_path) and not os.path.exists(legacy_db):
return
# Inject the recall instruction
output = json.dumps(
{
"hookSpecificOutput": {
"hookEventName": "UserPromptSubmit",
"additionalContext": RECALL_INSTRUCTION,
}
}
)
print(output)
if __name__ == "__main__":
main()

View file

@ -50,7 +50,7 @@
"hooks": [
{
"type": "command",
"command": "python3 /Users/viktorbarzin/code/claude-memory-mcp/hooks/auto-allow-memory-tools.py",
"command": "python3 /Users/viktorbarzin/.claude/hooks/auto-allow-memory-tools.py",
"timeout": 3
}
]
@ -61,7 +61,7 @@
"hooks": [
{
"type": "command",
"command": "/Users/viktorbarzin/code/claude-memory-mcp/hooks/pre-compact-backup.sh",
"command": "/Users/viktorbarzin/.claude/hooks/pre-compact-backup.sh",
"timeout": 30
}
]
@ -82,7 +82,7 @@
"hooks": [
{
"type": "command",
"command": "python3 /Users/viktorbarzin/code/claude-memory-mcp/hooks/auto-learn.py",
"command": "python3 /Users/viktorbarzin/.claude/hooks/auto-learn.py",
"async": true
}
]
@ -93,7 +93,7 @@
"hooks": [
{
"type": "command",
"command": "/Users/viktorbarzin/code/claude-memory-mcp/hooks/post-compact-recovery.sh",
"command": "/Users/viktorbarzin/.claude/hooks/post-compact-recovery.sh",
"timeout": 10
}
]
@ -102,7 +102,7 @@
"hooks": [
{
"type": "command",
"command": "python3 /Users/viktorbarzin/code/claude-memory-mcp/hooks/user-prompt-recall.py",
"command": "python3 /Users/viktorbarzin/.claude/hooks/user-prompt-recall.py",
"timeout": 5
}
]