From 94330755d8e40abdd332841cd6fb341567897d38 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 10:55:21 +0000 Subject: [PATCH 01/10] feat: add streamable-http MCP transport alongside SSE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SSE transport has reliability issues through Cloudflare/Traefik proxies (connections drop, causing init failures on reconnect). Streamable HTTP is stateless — each request carries its own session, avoiding persistent connection issues. New endpoint: POST/GET/DELETE /mcp/mcp (streamable-http) Existing: GET /mcp/sse + POST /mcp/messages/ (SSE, unchanged) Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 49 +++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index c912dd2..69096da 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -15,6 +15,7 @@ from fastapi.responses import Response from fastapi.staticfiles import StaticFiles from mcp.server.fastmcp import FastMCP from mcp.server.sse import SseServerTransport +from mcp.server.streamable_http import StreamableHTTPServerTransport from starlette.routing import Mount, Route from starlette.types import ASGIApp, Receive, Scope, Send @@ -1296,11 +1297,57 @@ class HandleSSE: ) +# Streamable HTTP transport — stateless, no persistent SSE connection needed. +# Each request carries its own init+tool call. More reliable through proxies. +class HandleStreamableHTTP: + """ASGI app for streamable-http MCP connections.""" + def __init__(self) -> None: + self._transport: StreamableHTTPServerTransport | None = None + + async def __call__(self, scope: Any, receive: Any, send: Any) -> None: + user_id = "default" + for name, value in scope.get("headers", []): + if name == b"authorization": + token = value.decode().removeprefix("Bearer ").strip() + resolved = _resolve_user_from_token(token) + if resolved: + user_id = resolved + break + _current_user.set(user_id) + + session_id = None + for name, value in scope.get("headers", []): + if name == b"mcp-session-id": + session_id = value.decode() + break + + transport = StreamableHTTPServerTransport( + mcp_session_id=session_id, + is_json_response_enabled=True, + ) + async with transport.connect() as (read_stream, write_stream): + import anyio + async with anyio.create_task_group() as tg: + async def run_server() -> None: + await mcp_server._mcp_server.run( + read_stream, write_stream, + mcp_server._mcp_server.create_initialization_options(), + ) + + tg.start_soon(run_server) + await transport.handle_request(scope, receive, send) + tg.cancel_scope.cancel() + + +streamable_handler = HandleStreamableHTTP() + # Static files for UI (before MCP mount) app.mount("/static", StaticFiles(directory=UI_DIR), name="static") -# Client connects to /mcp/sse, posts to /mcp/messages/ +# Client connects to /mcp/sse, posts to /mcp/messages/ (SSE transport) +# Client can also POST to /mcp/mcp (streamable-http transport) app.router.routes.insert(0, Mount("/mcp", routes=[ Route("/sse", endpoint=HandleSSE()), Mount("/messages", app=sse_transport.handle_post_message), + Route("/mcp", endpoint=streamable_handler, methods=["GET", "POST", "DELETE"]), ])) From 03681aae4930515d7a827e110d39ce64a024aad4 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 12:27:44 +0000 Subject: [PATCH 02/10] fix: use StreamableHTTPSessionManager in stateless mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous per-request transport implementation couldn't maintain session state. Using the SDK's built-in SessionManager with stateless=True means sessions start pre-initialized — tool calls work immediately without the init handshake, avoiding the reconnection race condition. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 44 ++++++++++++------------------------ 1 file changed, 14 insertions(+), 30 deletions(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index 69096da..f1fdc15 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -15,7 +15,7 @@ from fastapi.responses import Response from fastapi.staticfiles import StaticFiles from mcp.server.fastmcp import FastMCP from mcp.server.sse import SseServerTransport -from mcp.server.streamable_http import StreamableHTTPServerTransport +from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from starlette.routing import Mount, Route from starlette.types import ASGIApp, Receive, Scope, Send @@ -42,7 +42,8 @@ _current_user: ContextVar[str] = ContextVar("_current_user", default="default") @asynccontextmanager async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: await init_pool() - yield + async with streamable_session_mgr.run(): + yield await close_pool() @@ -1297,12 +1298,17 @@ class HandleSSE: ) -# Streamable HTTP transport — stateless, no persistent SSE connection needed. -# Each request carries its own init+tool call. More reliable through proxies. +# Streamable HTTP transport — session manager handles lifecycle automatically. +# More reliable through proxies than SSE since responses come in HTTP body. +streamable_session_mgr = StreamableHTTPSessionManager( + app=mcp_server._mcp_server, + json_response=True, + stateless=True, +) + + class HandleStreamableHTTP: - """ASGI app for streamable-http MCP connections.""" - def __init__(self) -> None: - self._transport: StreamableHTTPServerTransport | None = None + """ASGI wrapper that sets _current_user before delegating to the session manager.""" async def __call__(self, scope: Any, receive: Any, send: Any) -> None: user_id = "default" @@ -1314,29 +1320,7 @@ class HandleStreamableHTTP: user_id = resolved break _current_user.set(user_id) - - session_id = None - for name, value in scope.get("headers", []): - if name == b"mcp-session-id": - session_id = value.decode() - break - - transport = StreamableHTTPServerTransport( - mcp_session_id=session_id, - is_json_response_enabled=True, - ) - async with transport.connect() as (read_stream, write_stream): - import anyio - async with anyio.create_task_group() as tg: - async def run_server() -> None: - await mcp_server._mcp_server.run( - read_stream, write_stream, - mcp_server._mcp_server.create_initialization_options(), - ) - - tg.start_soon(run_server) - await transport.handle_request(scope, receive, send) - tg.cancel_scope.cancel() + await streamable_session_mgr.handle_request(scope, receive, send) streamable_handler = HandleStreamableHTTP() From 43a5513f6c3d89896586c456bac61cd9c7f8490c Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 13:27:58 +0000 Subject: [PATCH 03/10] feat: make all memories public by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All memories are now visible to all users in recall/list/count queries. Each memory still has an owner (user_id) who retains exclusive delete rights. This removes the need for explicit sharing — wizard and emo automatically see each other's memories. Changes: - recall/list: single query without user_id filter, added owner field - count: counts all memories globally - REST categories/tags: show all users' data - Delete/update: unchanged (owner-only or write-share) - Sync: unchanged (stays user-scoped) Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 219 ++++++----------------------------- tests/test_api.py | 54 ++++----- 2 files changed, 58 insertions(+), 215 deletions(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index f1fdc15..305c46d 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -213,16 +213,15 @@ async def recall_memories(body: MemoryRecall, user: AuthUser = Depends(get_curre params.append(body.category) async with pool.acquire() as conn: - # Own memories (AND-match) + # All memories (public by default) — AND-match rows = await conn.fetch( f""" SELECT id, content, category, tags, importance, is_sensitive, ts_rank(search_vector, query) AS rank, - created_at, updated_at, - NULL::text AS shared_by, NULL::text AS share_permission + created_at, updated_at, user_id AS owner, + CASE WHEN user_id = $1 THEN NULL ELSE user_id END AS shared_by FROM memories, plainto_tsquery('english', $2) query - WHERE user_id = $1 - AND deleted_at IS NULL + WHERE deleted_at IS NULL AND (search_vector @@ query OR $2 = '') {category_filter} ORDER BY {order_clause} @@ -231,64 +230,9 @@ async def recall_memories(body: MemoryRecall, user: AuthUser = Depends(get_curre *params, ) - # Individually shared memories - shared_rows = await conn.fetch( - f""" - SELECT m.id, m.content, m.category, m.tags, m.importance, m.is_sensitive, - ts_rank(m.search_vector, query) AS rank, - m.created_at, m.updated_at, - m.user_id AS shared_by, ms.permission AS share_permission - FROM memories m - JOIN memory_shares ms ON ms.memory_id = m.id, - plainto_tsquery('english', $2) query - WHERE ms.shared_with = $1 - AND m.deleted_at IS NULL - AND (m.search_vector @@ query OR $2 = '') - {category_filter} - ORDER BY {order_clause} - LIMIT $3 - """, - *params, - ) + all_rows = list(rows) - # Tag-shared memories - tag_shared_rows = await conn.fetch( - f""" - SELECT DISTINCT ON (m.id) - m.id, m.content, m.category, m.tags, m.importance, m.is_sensitive, - ts_rank(m.search_vector, query) AS rank, - m.created_at, m.updated_at, - m.user_id AS shared_by, ts.permission AS share_permission - FROM memories m - JOIN tag_shares ts ON ts.owner_id = m.user_id, - plainto_tsquery('english', $2) query - WHERE ts.shared_with = $1 - AND m.deleted_at IS NULL - AND (m.search_vector @@ query OR $2 = '') - AND EXISTS ( - SELECT 1 FROM unnest(string_to_array(m.tags, ',')) t - WHERE trim(t) = ts.tag - ) - {category_filter} - ORDER BY m.id - LIMIT $3 - """, - *params, - ) - - # Merge and deduplicate - seen_ids: set[int] = set() - all_rows = [] - for row in list(rows) + list(shared_rows) + list(tag_shared_rows): - if row["id"] not in seen_ids: - seen_ids.add(row["id"]) - all_rows.append(row) - - # Sort merged results by importance desc and trim - all_rows.sort(key=lambda r: r["importance"], reverse=True) - all_rows = all_rows[:body.limit] - - # If AND-match returned too few results, broaden to OR-match (own memories only) + # If AND-match returned too few results, broaden to OR-match if len(all_rows) < body.limit and query_text: words = query_text.split() if len(words) > 1: @@ -298,15 +242,15 @@ async def recall_memories(body: MemoryRecall, user: AuthUser = Depends(get_curre if body.category: or_cat_filter = "AND category = $4" or_params.append(body.category) + seen_ids = {r["id"] for r in all_rows} or_rows = await conn.fetch( f""" SELECT id, content, category, tags, importance, is_sensitive, ts_rank(search_vector, query) AS rank, - created_at, updated_at, - NULL::text AS shared_by, NULL::text AS share_permission + created_at, updated_at, user_id AS owner, + CASE WHEN user_id = $1 THEN NULL ELSE user_id END AS shared_by FROM memories, to_tsquery('english', $2) query - WHERE user_id = $1 - AND deleted_at IS NULL + WHERE deleted_at IS NULL AND search_vector @@ query {or_cat_filter} ORDER BY {order_clause} @@ -331,10 +275,10 @@ async def recall_memories(body: MemoryRecall, user: AuthUser = Depends(get_curre "importance": row["importance"], "is_sensitive": row["is_sensitive"], "rank": float(row["rank"]), + "owner": row["owner"], "created_at": row["created_at"].isoformat(), "updated_at": row["updated_at"].isoformat(), "shared_by": row["shared_by"], - "share_permission": row["share_permission"], } ) @@ -351,10 +295,10 @@ async def list_memories( ) -> dict[str, Any]: pool = await get_pool() - # Build WHERE clauses dynamically - where_clauses = ["user_id = $1", "deleted_at IS NULL"] - count_params: list[Any] = [user.user_id] - param_idx = 2 + # Build WHERE clauses dynamically — all memories are public + where_clauses = ["deleted_at IS NULL"] + count_params: list[Any] = [] + param_idx = 1 if category: where_clauses.append(f"category = ${param_idx}") @@ -373,7 +317,7 @@ async def list_memories( params: list[Any] = [*count_params, limit, offset] query = f""" - SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at + SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at, user_id AS owner FROM memories WHERE {where} ORDER BY importance DESC LIMIT ${param_idx} OFFSET ${param_idx + 1} """ @@ -395,6 +339,7 @@ async def list_memories( "tags": row["tags"], "importance": row["importance"], "is_sensitive": row["is_sensitive"], + "owner": row["owner"], "created_at": row["created_at"].isoformat(), "updated_at": row["updated_at"].isoformat(), } @@ -405,30 +350,28 @@ async def list_memories( @app.get("/api/categories") async def list_categories(user: AuthUser = Depends(get_current_user)) -> dict[str, Any]: - """Return distinct category values for the current user.""" + """Return distinct category values across all users.""" pool = await get_pool() async with pool.acquire() as conn: rows = await conn.fetch( - "SELECT DISTINCT category FROM memories WHERE user_id = $1 AND deleted_at IS NULL ORDER BY category", - user.user_id, + "SELECT DISTINCT category FROM memories WHERE deleted_at IS NULL ORDER BY category", ) return {"categories": [r["category"] for r in rows]} @app.get("/api/tags") async def list_tags(user: AuthUser = Depends(get_current_user)) -> dict[str, Any]: - """Return all distinct tags with memory counts for the current user.""" + """Return all distinct tags with memory counts across all users.""" pool = await get_pool() async with pool.acquire() as conn: rows = await conn.fetch( """ SELECT trim(t) as tag, COUNT(*) as count FROM memories, unnest(string_to_array(tags, ',')) AS t - WHERE user_id = $1 AND deleted_at IS NULL AND tags != '' AND tags IS NOT NULL + WHERE deleted_at IS NULL AND tags != '' AND tags IS NOT NULL GROUP BY trim(t) ORDER BY count DESC """, - user.user_id, ) return {"tags": [{"tag": r["tag"], "count": r["count"]} for r in rows]} @@ -946,9 +889,10 @@ async def memory_recall(context: str, expanded_query: str = "", f""" SELECT id, content, category, tags, importance, is_sensitive, ts_rank(search_vector, query) AS rank, created_at, updated_at, - NULL::text AS shared_by + user_id AS owner, + CASE WHEN user_id = $1 THEN NULL ELSE user_id END AS shared_by FROM memories, plainto_tsquery('english', $2) query - WHERE user_id = $1 AND deleted_at IS NULL + WHERE deleted_at IS NULL AND (search_vector @@ query OR $2 = '') {category_filter} ORDER BY {order_clause} @@ -957,34 +901,8 @@ async def memory_recall(context: str, expanded_query: str = "", *params, ) - # Also fetch shared memories (individual + tag-based) - shared_rows = await conn.fetch( - """ - SELECT DISTINCT ON (m.id) m.id, m.content, m.category, m.tags, m.importance, - m.is_sensitive, ts_rank(m.search_vector, query) AS rank, - m.created_at, m.updated_at, m.user_id AS shared_by - FROM memories m, plainto_tsquery('english', $2) query - WHERE m.deleted_at IS NULL - AND (m.search_vector @@ query OR $2 = '') - AND m.user_id != $1 - AND ( - EXISTS (SELECT 1 FROM memory_shares ms WHERE ms.memory_id = m.id AND ms.shared_with = $1) - OR EXISTS ( - SELECT 1 FROM tag_shares ts - WHERE ts.owner_id = m.user_id AND ts.shared_with = $1 - AND EXISTS (SELECT 1 FROM unnest(string_to_array(m.tags, ',')) t WHERE trim(t) = ts.tag) - ) - ) - ORDER BY m.id - LIMIT $3 - """, - *params, - ) - - seen_ids = set() results = [] for row in rows: - seen_ids.add(row["id"]) c = row["content"] if row["is_sensitive"]: c = f"[SENSITIVE - use secret_get(id={row['id']})]" @@ -992,27 +910,14 @@ async def memory_recall(context: str, expanded_query: str = "", "id": row["id"], "content": c, "category": row["category"], "tags": row["tags"], "importance": row["importance"], "rank": float(row["rank"]), + "owner": row["owner"], "created_at": row["created_at"].isoformat(), "updated_at": row["updated_at"].isoformat(), } + if row["shared_by"]: + entry["shared_by"] = row["shared_by"] results.append(entry) - for row in shared_rows: - if row["id"] in seen_ids: - continue - seen_ids.add(row["id"]) - c = row["content"] - if row["is_sensitive"]: - c = f"[SENSITIVE - use secret_get(id={row['id']})]" - results.append({ - "id": row["id"], "content": c, "category": row["category"], - "tags": row["tags"], "importance": row["importance"], - "rank": float(row["rank"]), - "shared_by": row["shared_by"], - "created_at": row["created_at"].isoformat(), - "updated_at": row["updated_at"].isoformat(), - }) - return json.dumps({"memories": results}) @@ -1020,81 +925,30 @@ async def memory_recall(context: str, expanded_query: str = "", async def memory_list(category: str | None = None, limit: int = 20) -> str: """List stored memories.""" pool = await get_pool() - user_id = _current_user.get() if category: - query = """SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at - FROM memories WHERE user_id = $1 AND deleted_at IS NULL AND category = $2 - ORDER BY importance DESC LIMIT $3""" - params: list[Any] = [user_id, category, limit] - else: - query = """SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at - FROM memories WHERE user_id = $1 AND deleted_at IS NULL + query = """SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at, user_id AS owner + FROM memories WHERE deleted_at IS NULL AND category = $1 ORDER BY importance DESC LIMIT $2""" - params = [user_id, limit] - - if category: - shared_query = """ - SELECT DISTINCT ON (m.id) m.id, m.content, m.category, m.tags, m.importance, - m.is_sensitive, m.created_at, m.updated_at, m.user_id AS shared_by - FROM memories m - WHERE m.deleted_at IS NULL AND m.category = $2 AND m.user_id != $1 - AND ( - EXISTS (SELECT 1 FROM memory_shares ms WHERE ms.memory_id = m.id AND ms.shared_with = $1) - OR EXISTS ( - SELECT 1 FROM tag_shares ts - WHERE ts.owner_id = m.user_id AND ts.shared_with = $1 - AND EXISTS (SELECT 1 FROM unnest(string_to_array(m.tags, ',')) t WHERE trim(t) = ts.tag) - ) - ) - ORDER BY m.id LIMIT $3""" - shared_params: list[Any] = [user_id, category, limit] + params: list[Any] = [category, limit] else: - shared_query = """ - SELECT DISTINCT ON (m.id) m.id, m.content, m.category, m.tags, m.importance, - m.is_sensitive, m.created_at, m.updated_at, m.user_id AS shared_by - FROM memories m - WHERE m.deleted_at IS NULL AND m.user_id != $1 - AND ( - EXISTS (SELECT 1 FROM memory_shares ms WHERE ms.memory_id = m.id AND ms.shared_with = $1) - OR EXISTS ( - SELECT 1 FROM tag_shares ts - WHERE ts.owner_id = m.user_id AND ts.shared_with = $1 - AND EXISTS (SELECT 1 FROM unnest(string_to_array(m.tags, ',')) t WHERE trim(t) = ts.tag) - ) - ) - ORDER BY m.id LIMIT $2""" - shared_params = [user_id, limit] + query = """SELECT id, content, category, tags, importance, is_sensitive, created_at, updated_at, user_id AS owner + FROM memories WHERE deleted_at IS NULL + ORDER BY importance DESC LIMIT $1""" + params = [limit] async with pool.acquire() as conn: rows = await conn.fetch(query, *params) - shared_rows = await conn.fetch(shared_query, *shared_params) - seen_ids = set() results = [] for row in rows: - seen_ids.add(row["id"]) c = row["content"] if row["is_sensitive"]: c = f"[SENSITIVE - use secret_get(id={row['id']})]" results.append({ "id": row["id"], "content": c, "category": row["category"], "tags": row["tags"], "importance": row["importance"], - "created_at": row["created_at"].isoformat(), - "updated_at": row["updated_at"].isoformat(), - }) - - for row in shared_rows: - if row["id"] in seen_ids: - continue - seen_ids.add(row["id"]) - c = row["content"] - if row["is_sensitive"]: - c = f"[SENSITIVE - use secret_get(id={row['id']})]" - results.append({ - "id": row["id"], "content": c, "category": row["category"], - "tags": row["tags"], "importance": row["importance"], - "shared_by": row["shared_by"], + "owner": row["owner"], "created_at": row["created_at"].isoformat(), "updated_at": row["updated_at"].isoformat(), }) @@ -1131,9 +985,8 @@ async def memory_delete(memory_id: int) -> str: async def memory_count() -> str: """Count total memories.""" pool = await get_pool() - user_id = _current_user.get() async with pool.acquire() as conn: - count = await conn.fetchval("SELECT COUNT(*) FROM memories WHERE user_id = $1 AND deleted_at IS NULL", user_id) + count = await conn.fetchval("SELECT COUNT(*) FROM memories WHERE deleted_at IS NULL") return json.dumps({"count": count}) diff --git a/tests/test_api.py b/tests/test_api.py index b79bb7d..d5e7765 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -37,6 +37,7 @@ def _make_memory_row(**overrides): "created_at": now, "updated_at": now, "deleted_at": None, + "owner": "testuser", "shared_by": None, "share_permission": None, } @@ -139,14 +140,11 @@ async def test_store_memory_creates_record_with_user_id(client): @pytest.mark.asyncio -async def test_recall_returns_only_user_memories(client): +async def test_recall_returns_all_memories(client): ac, conn, app_mod = client - # recall calls fetch 3 times: own, shared, tag-shared; plus OR-fallback if < limit - conn.fetch.side_effect = [ - [_make_memory_row(id=1, content="user memory", is_sensitive=False)], # own - [], # individually shared - [], # tag-shared - [], # OR-match fallback + # recall now runs a single query (all memories are public) + conn.fetch.return_value = [ + _make_memory_row(id=1, content="user memory", is_sensitive=False, owner="testuser", shared_by=None), ] async with ac: @@ -161,19 +159,14 @@ async def test_recall_returns_only_user_memories(client): results = data["memories"] assert len(results) == 1 assert results[0]["content"] == "user memory" - - # Verify query includes user_id filter - call_args = conn.fetch.call_args - assert call_args[0][1] == "testuser" + assert results[0]["owner"] == "testuser" @pytest.mark.asyncio async def test_recall_redacts_sensitive_memories(client): ac, conn, app_mod = client - conn.fetch.side_effect = [ - [_make_memory_row(id=5, content="[REDACTED]", is_sensitive=True)], # own - [], # individually shared - [], # tag-shared + conn.fetch.return_value = [ + _make_memory_row(id=5, content="[REDACTED]", is_sensitive=True, owner="testuser", shared_by=None), ] async with ac: @@ -191,11 +184,11 @@ async def test_recall_redacts_sensitive_memories(client): @pytest.mark.asyncio -async def test_list_returns_only_user_memories(client): +async def test_list_returns_all_memories(client): ac, conn, app_mod = client conn.fetch.return_value = [ - _make_memory_row(id=1, content="mem1"), - _make_memory_row(id=2, content="mem2"), + _make_memory_row(id=1, content="mem1", owner="testuser"), + _make_memory_row(id=2, content="mem2", owner="otheruser"), ] async with ac: @@ -208,10 +201,8 @@ async def test_list_returns_only_user_memories(client): data = resp.json() results = data["memories"] assert len(results) == 2 - - # Verify user_id filter - call_args = conn.fetch.call_args - assert call_args[0][1] == "testuser" + assert results[0]["owner"] == "testuser" + assert results[1]["owner"] == "otheruser" @pytest.mark.asyncio @@ -601,15 +592,14 @@ async def test_my_shares_returns_outgoing_shares(client): @pytest.mark.asyncio -async def test_recall_includes_shared_memories(client): - """POST /api/memories/recall includes shared memories with shared_by field.""" +async def test_recall_includes_all_users_memories(client): + """POST /api/memories/recall returns all users' memories with owner field.""" ac, conn, app_mod = client - # recall calls fetch multiple times: own, shared, tag-shared, OR-fallback - conn.fetch.side_effect = [ - [_make_memory_row(id=1, content="own memory", user_id="testuser", shared_by=None)], # own - [_make_memory_row(id=2, content="shared memory", user_id="owner1", shared_by="owner1")], # shared - [_make_memory_row(id=3, content="tag shared", user_id="owner2", shared_by="owner2")], # tag-shared - [], # OR-fallback + # Single query returns all memories (public by default) + conn.fetch.return_value = [ + _make_memory_row(id=1, content="own memory", owner="testuser", shared_by=None), + _make_memory_row(id=2, content="other memory", owner="owner1", shared_by="owner1"), + _make_memory_row(id=3, content="another memory", owner="owner2", shared_by="owner2"), ] async with ac: @@ -623,10 +613,10 @@ async def test_recall_includes_shared_memories(client): data = resp.json() results = data["memories"] assert len(results) == 3 - # Check that shared_by field appears in shared memories + assert results[0]["owner"] == "testuser" assert results[0]["shared_by"] is None + assert results[1]["owner"] == "owner1" assert results[1]["shared_by"] == "owner1" - assert results[2]["shared_by"] == "owner2" @pytest.mark.asyncio From d03a77ac36d6841f78198325f28d328928fb178d Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 13:56:12 +0000 Subject: [PATCH 04/10] feat: raise default query limits to 10000 (effectively unlimited) With 375 memories and 1M context window, low limits just hide results. Agents can still pass a smaller limit when they want fewer results. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 6 +++--- src/claude_memory/api/models.py | 2 +- tests/test_properties.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index 305c46d..4c70337 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -289,7 +289,7 @@ async def recall_memories(body: MemoryRecall, user: AuthUser = Depends(get_curre async def list_memories( category: Optional[str] = None, tag: Optional[str] = None, - limit: int = 50, + limit: int = 10000, offset: int = 0, user: AuthUser = Depends(get_current_user), ) -> dict[str, Any]: @@ -862,7 +862,7 @@ async def memory_store(content: str, category: str = "facts", tags: str = "", @mcp_server.tool() async def memory_recall(context: str, expanded_query: str = "", category: str | None = None, sort_by: str = "importance", - limit: int = 10) -> str: + limit: int = 10000) -> str: """Recall memories by semantic search.""" pool = await get_pool() user_id = _current_user.get() @@ -922,7 +922,7 @@ async def memory_recall(context: str, expanded_query: str = "", @mcp_server.tool() -async def memory_list(category: str | None = None, limit: int = 20) -> str: +async def memory_list(category: str | None = None, limit: int = 10000) -> str: """List stored memories.""" pool = await get_pool() diff --git a/src/claude_memory/api/models.py b/src/claude_memory/api/models.py index 61b9704..beddff9 100644 --- a/src/claude_memory/api/models.py +++ b/src/claude_memory/api/models.py @@ -20,7 +20,7 @@ class MemoryRecall(BaseModel): expanded_query: str = "" category: Optional[str] = None sort_by: Literal["importance", "relevance", "recency"] = "importance" - limit: int = Field(default=10, ge=1, le=500) + limit: int = Field(default=10000, ge=1, le=10000) class MemoryResponse(BaseModel): diff --git a/tests/test_properties.py b/tests/test_properties.py index 38390b2..b4fb49e 100644 --- a/tests/test_properties.py +++ b/tests/test_properties.py @@ -89,10 +89,10 @@ def test_invalid_sort_by_rejected(sort_by): pass -@given(limit=st.integers(min_value=501, max_value=10000)) +@given(limit=st.integers(min_value=10001, max_value=50000)) @settings(max_examples=10) def test_limit_too_high_rejected(limit): - """Limit above 500 is rejected after model update.""" + """Limit above 10000 is rejected after model update.""" try: MemoryRecall(context="test", limit=limit) assert False, "Should have raised ValidationError" From c88dd03cce72553a9b4cba11ebe05af0d5e6aa33 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 17:59:19 +0000 Subject: [PATCH 05/10] fix: add OR-fallback to MCP memory_recall tool The REST API /api/memories/recall already broadens to OR-match when the strict AND full-text search returns too few results, but the MCP tool handler was missing this fallback. Broad queries (many terms) produced empty results because plainto_tsquery ANDs all terms and no single memory contains every word. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index 4c70337..b3fff97 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -901,8 +901,39 @@ async def memory_recall(context: str, expanded_query: str = "", *params, ) + all_rows = list(rows) + + # If AND-match returned too few results, broaden to OR-match + if len(all_rows) < limit and query_text: + words = query_text.split() + if len(words) > 1: + or_tsquery = " | ".join(w for w in words if w) + or_params: list[Any] = [user_id, or_tsquery, limit] + or_cat_filter = "" + if category: + or_cat_filter = "AND category = $4" + or_params.append(category) + seen_ids = {r["id"] for r in all_rows} + or_rows = await conn.fetch( + f""" + SELECT id, content, category, tags, importance, is_sensitive, + ts_rank(search_vector, query) AS rank, created_at, updated_at, + user_id AS owner, + CASE WHEN user_id = $1 THEN NULL ELSE user_id END AS shared_by + FROM memories, to_tsquery('english', $2) query + WHERE deleted_at IS NULL + AND search_vector @@ query + {or_cat_filter} + ORDER BY {order_clause} + LIMIT $3 + """, + *or_params, + ) + all_rows = all_rows + [r for r in or_rows if r["id"] not in seen_ids] + all_rows = all_rows[:limit] + results = [] - for row in rows: + for row in all_rows: c = row["content"] if row["is_sensitive"]: c = f"[SENSITIVE - use secret_get(id={row['id']})]" From 73aefda82e6ddb2b3c3ae63769f2c3f5e48d2b62 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 18:19:52 +0000 Subject: [PATCH 06/10] feat: auto-split large memories at store time (>500 chars) When content exceeds 500 chars, it's automatically split into multiple memories on paragraph boundaries. Each chunk gets the same category, tags (with part-N-of-M suffix), keywords, and importance. Removes the old 800 char hard limit from the Pydantic model. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 63 +++++++++++++++++++++++++-------- src/claude_memory/api/models.py | 7 ++-- tests/test_properties.py | 13 +++---- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index b3fff97..b119c1d 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -834,29 +834,64 @@ def _resolve_user_from_token(token: str) -> str | None: mcp_server = FastMCP("claude-memory") +MAX_MEMORY_CHARS = 500 + + +def _split_content(text: str, max_chars: int = MAX_MEMORY_CHARS) -> list[str]: + """Split text into chunks on paragraph boundaries, each <= max_chars.""" + if len(text) <= max_chars: + return [text] + paragraphs = text.split("\n\n") + chunks: list[str] = [] + current = "" + for para in paragraphs: + candidate = f"{current}\n\n{para}".strip() if current else para + if len(candidate) <= max_chars: + current = candidate + else: + if current: + chunks.append(current) + # If a single paragraph exceeds max_chars, hard-split it + while len(para) > max_chars: + chunks.append(para[:max_chars]) + para = para[max_chars:] + current = para + if current: + chunks.append(current) + return chunks + + @mcp_server.tool() async def memory_store(content: str, category: str = "facts", tags: str = "", expanded_keywords: str = "", importance: float = 0.5) -> str: - """Store a new memory.""" + """Store a new memory. Content over 500 chars is auto-split into multiple memories.""" pool = await get_pool() user_id = _current_user.get() - is_sensitive = _detect_sensitive(content) - stored_content = content if not is_sensitive else _redact_content(content) + chunks = _split_content(content) + created_ids = [] async with pool.acquire() as conn: - row = await conn.fetchrow( - """INSERT INTO memories (user_id, content, category, tags, expanded_keywords, importance, is_sensitive) - VALUES ($1, $2, $3, $4, $5, $6, $7) - RETURNING id""", - user_id, stored_content, category, tags, expanded_keywords, importance, is_sensitive, - ) - memory_id = row["id"] + for i, chunk in enumerate(chunks): + is_sensitive = _detect_sensitive(chunk) + stored = chunk if not is_sensitive else _redact_content(chunk) + chunk_tags = f"{tags},part-{i + 1}-of-{len(chunks)}" if len(chunks) > 1 else tags - if is_sensitive and is_vault_configured(): - vault_path = await store_secret(user_id, memory_id, content) - await conn.execute("UPDATE memories SET vault_path = $1 WHERE id = $2", vault_path, memory_id) + row = await conn.fetchrow( + """INSERT INTO memories (user_id, content, category, tags, expanded_keywords, importance, is_sensitive) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING id""", + user_id, stored, category, chunk_tags, expanded_keywords, importance, is_sensitive, + ) + memory_id = row["id"] + created_ids.append(memory_id) - return json.dumps({"id": memory_id, "category": category, "importance": importance}) + if is_sensitive and is_vault_configured(): + vault_path = await store_secret(user_id, memory_id, chunk) + await conn.execute("UPDATE memories SET vault_path = $1 WHERE id = $2", vault_path, memory_id) + + if len(created_ids) == 1: + return json.dumps({"id": created_ids[0], "category": category, "importance": importance}) + return json.dumps({"ids": created_ids, "parts": len(created_ids), "category": category, "importance": importance}) @mcp_server.tool() diff --git a/src/claude_memory/api/models.py b/src/claude_memory/api/models.py index beddff9..d9678d0 100644 --- a/src/claude_memory/api/models.py +++ b/src/claude_memory/api/models.py @@ -3,11 +3,8 @@ from typing import Any, Literal, Optional from pydantic import BaseModel, Field -MAX_MEMORY_CHARS = 800 - - class MemoryStore(BaseModel): - content: str = Field(..., max_length=MAX_MEMORY_CHARS) + content: str category: str = "facts" tags: str = Field(default="", max_length=500) expanded_keywords: str = Field(default="", max_length=500) @@ -57,7 +54,7 @@ class UnshareTag(BaseModel): class MemoryUpdate(BaseModel): - content: Optional[str] = Field(None, max_length=MAX_MEMORY_CHARS) + content: Optional[str] = None tags: Optional[str] = None importance: Optional[float] = Field(None, ge=0.0, le=1.0) expanded_keywords: Optional[str] = None diff --git a/tests/test_properties.py b/tests/test_properties.py index b4fb49e..cf3f00d 100644 --- a/tests/test_properties.py +++ b/tests/test_properties.py @@ -29,15 +29,12 @@ def test_roundtrip_memory_store(mem): assert restored.tags == mem.tags -@given(content=st.text(min_size=801, max_size=1000)) +@given(content=st.text(min_size=801, max_size=2000)) @settings(max_examples=20) -def test_content_over_max_rejected(content): - """Content exceeding 800 chars is rejected.""" - try: - MemoryStore(content=content) - assert False, "Should have raised ValidationError" - except ValidationError: - pass +def test_content_over_500_accepted(content): + """Content over 500 chars is accepted by the model (auto-split happens server-side).""" + mem = MemoryStore(content=content) + assert len(mem.content) > 500 @given(importance=st.floats().filter(lambda x: x < 0.0 or x > 1.0).filter(lambda x: x == x)) # exclude NaN From 0c64cb05e6e675697915616bca681141e0b87ab6 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Wed, 8 Apr 2026 19:57:18 +0000 Subject: [PATCH 07/10] refactor: remove deprecated SSE transport, keep streamable-http only SSE is deprecated per MCP spec. Both clients (wizard, emo) already use type: "http" pointing to /mcp/mcp. Removes HandleSSE class, SseServerTransport, and /mcp/sse + /mcp/messages/ routes. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/claude_memory/api/app.py | 32 ++------------------------------ 1 file changed, 2 insertions(+), 30 deletions(-) diff --git a/src/claude_memory/api/app.py b/src/claude_memory/api/app.py index b119c1d..df4e44f 100644 --- a/src/claude_memory/api/app.py +++ b/src/claude_memory/api/app.py @@ -14,7 +14,6 @@ from fastapi import Depends, FastAPI, HTTPException from fastapi.responses import Response from fastapi.staticfiles import StaticFiles from mcp.server.fastmcp import FastMCP -from mcp.server.sse import SseServerTransport from mcp.server.streamable_http_manager import StreamableHTTPSessionManager from starlette.routing import Mount, Route from starlette.types import ASGIApp, Receive, Scope, Send @@ -1194,31 +1193,7 @@ class MCPAuthMiddleware: app.add_middleware(MCPAuthMiddleware) -# Mount SSE transport -sse_transport = SseServerTransport("/messages/") - - -class HandleSSE: - """ASGI app for SSE connections.""" - async def __call__(self, scope: Any, receive: Any, send: Any) -> None: - # Extract user from Authorization header for multi-user MCP - user_id = "default" - for name, value in scope.get("headers", []): - if name == b"authorization": - token = value.decode().removeprefix("Bearer ").strip() - resolved = _resolve_user_from_token(token) - if resolved: - user_id = resolved - break - _current_user.set(user_id) - async with sse_transport.connect_sse(scope, receive, send) as (read_stream, write_stream): - await mcp_server._mcp_server.run( - read_stream, write_stream, mcp_server._mcp_server.create_initialization_options() - ) - - -# Streamable HTTP transport — session manager handles lifecycle automatically. -# More reliable through proxies than SSE since responses come in HTTP body. +# Streamable HTTP transport — the only MCP transport (SSE is deprecated). streamable_session_mgr = StreamableHTTPSessionManager( app=mcp_server._mcp_server, json_response=True, @@ -1247,10 +1222,7 @@ streamable_handler = HandleStreamableHTTP() # Static files for UI (before MCP mount) app.mount("/static", StaticFiles(directory=UI_DIR), name="static") -# Client connects to /mcp/sse, posts to /mcp/messages/ (SSE transport) -# Client can also POST to /mcp/mcp (streamable-http transport) +# MCP streamable-http transport at /mcp/mcp app.router.routes.insert(0, Mount("/mcp", routes=[ - Route("/sse", endpoint=HandleSSE()), - Mount("/messages", app=sse_transport.handle_post_message), Route("/mcp", endpoint=streamable_handler, methods=["GET", "POST", "DELETE"]), ])) From 473f11a7669d081780b9a4bce6c8e918968cd21b Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Thu, 9 Apr 2026 23:20:55 +0000 Subject: [PATCH 08/10] bd init: initialize beads issue tracking --- .beads/.gitignore | 72 ++++++++++++++++++++++++++++ .beads/README.md | 81 +++++++++++++++++++++++++++++++ .beads/config.yaml | 54 +++++++++++++++++++++ .beads/hooks/post-checkout | 24 ++++++++++ .beads/hooks/post-merge | 24 ++++++++++ .beads/hooks/pre-commit | 24 ++++++++++ .beads/hooks/pre-push | 24 ++++++++++ .beads/hooks/prepare-commit-msg | 24 ++++++++++ .beads/metadata.json | 7 +++ .claude/settings.json | 26 ++++++++++ .gitignore | 4 ++ AGENTS.md | 84 +++++++++++++++++++++++++++++++++ CLAUDE.md | 69 +++++++++++++++++++++++++++ 13 files changed, 517 insertions(+) create mode 100644 .beads/.gitignore create mode 100644 .beads/README.md create mode 100644 .beads/config.yaml create mode 100755 .beads/hooks/post-checkout create mode 100755 .beads/hooks/post-merge create mode 100755 .beads/hooks/pre-commit create mode 100755 .beads/hooks/pre-push create mode 100755 .beads/hooks/prepare-commit-msg create mode 100644 .beads/metadata.json create mode 100644 .claude/settings.json create mode 100644 AGENTS.md create mode 100644 CLAUDE.md diff --git a/.beads/.gitignore b/.beads/.gitignore new file mode 100644 index 0000000..eb82c48 --- /dev/null +++ b/.beads/.gitignore @@ -0,0 +1,72 @@ +# Dolt database (managed by Dolt, not git) +dolt/ + +# Runtime files +bd.sock +bd.sock.startlock +sync-state.json +last-touched +.exclusive-lock + +# Daemon runtime (lock, log, pid) +daemon.* + +# Interactions log (runtime, not versioned) +interactions.jsonl + +# Push state (runtime, per-machine) +push-state.json + +# Lock files (various runtime locks) +*.lock + +# Credential key (encryption key for federation peer auth — never commit) +.beads-credential-key + +# Local version tracking (prevents upgrade notification spam after git ops) +.local_version + +# Worktree redirect file (contains relative path to main repo's .beads/) +# Must not be committed as paths would be wrong in other clones +redirect + +# Sync state (local-only, per-machine) +# These files are machine-specific and should not be shared across clones +.sync.lock +export-state/ +export-state.json + +# Ephemeral store (SQLite - wisps/molecules, intentionally not versioned) +ephemeral.sqlite3 +ephemeral.sqlite3-journal +ephemeral.sqlite3-wal +ephemeral.sqlite3-shm + +# Dolt server management (auto-started by bd) +dolt-server.pid +dolt-server.log +dolt-server.lock +dolt-server.port +dolt-server.activity + +# Corrupt backup directories (created by bd doctor --fix recovery) +*.corrupt.backup/ + +# Backup data (auto-exported JSONL, local-only) +backup/ + +# Per-project environment file (Dolt connection config, GH#2520) +.env + +# Legacy files (from pre-Dolt versions) +*.db +*.db?* +*.db-journal +*.db-wal +*.db-shm +db.sqlite +bd.db +# NOTE: Do NOT add negation patterns here. +# They would override fork protection in .git/info/exclude. +# Config files (metadata.json, config.yaml) are tracked by git by default +# since no pattern above ignores them. diff --git a/.beads/README.md b/.beads/README.md new file mode 100644 index 0000000..dbfe363 --- /dev/null +++ b/.beads/README.md @@ -0,0 +1,81 @@ +# Beads - AI-Native Issue Tracking + +Welcome to Beads! This repository uses **Beads** for issue tracking - a modern, AI-native tool designed to live directly in your codebase alongside your code. + +## What is Beads? + +Beads is issue tracking that lives in your repo, making it perfect for AI coding agents and developers who want their issues close to their code. No web UI required - everything works through the CLI and integrates seamlessly with git. + +**Learn more:** [github.com/steveyegge/beads](https://github.com/steveyegge/beads) + +## Quick Start + +### Essential Commands + +```bash +# Create new issues +bd create "Add user authentication" + +# View all issues +bd list + +# View issue details +bd show + +# Update issue status +bd update --claim +bd update --status done + +# Sync with Dolt remote +bd dolt push +``` + +### Working with Issues + +Issues in Beads are: +- **Git-native**: Stored in Dolt database with version control and branching +- **AI-friendly**: CLI-first design works perfectly with AI coding agents +- **Branch-aware**: Issues can follow your branch workflow +- **Always in sync**: Auto-syncs with your commits + +## Why Beads? + +✨ **AI-Native Design** +- Built specifically for AI-assisted development workflows +- CLI-first interface works seamlessly with AI coding agents +- No context switching to web UIs + +🚀 **Developer Focused** +- Issues live in your repo, right next to your code +- Works offline, syncs when you push +- Fast, lightweight, and stays out of your way + +🔧 **Git Integration** +- Automatic sync with git commits +- Branch-aware issue tracking +- Dolt-native three-way merge resolution + +## Get Started with Beads + +Try Beads in your own projects: + +```bash +# Install Beads +curl -sSL https://raw.githubusercontent.com/steveyegge/beads/main/scripts/install.sh | bash + +# Initialize in your repo +bd init + +# Create your first issue +bd create "Try out Beads" +``` + +## Learn More + +- **Documentation**: [github.com/steveyegge/beads/docs](https://github.com/steveyegge/beads/tree/main/docs) +- **Quick Start Guide**: Run `bd quickstart` +- **Examples**: [github.com/steveyegge/beads/examples](https://github.com/steveyegge/beads/tree/main/examples) + +--- + +*Beads: Issue tracking that moves at the speed of thought* ⚡ diff --git a/.beads/config.yaml b/.beads/config.yaml new file mode 100644 index 0000000..232b151 --- /dev/null +++ b/.beads/config.yaml @@ -0,0 +1,54 @@ +# Beads Configuration File +# This file configures default behavior for all bd commands in this repository +# All settings can also be set via environment variables (BD_* prefix) +# or overridden with command-line flags + +# Issue prefix for this repository (used by bd init) +# If not set, bd init will auto-detect from directory name +# Example: issue-prefix: "myproject" creates issues like "myproject-1", "myproject-2", etc. +# issue-prefix: "" + +# Use no-db mode: JSONL-only, no Dolt database +# When true, bd will use .beads/issues.jsonl as the source of truth +# no-db: false + +# Enable JSON output by default +# json: false + +# Feedback title formatting for mutating commands (create/update/close/dep/edit) +# 0 = hide titles, N > 0 = truncate to N characters +# output: +# title-length: 255 + +# Default actor for audit trails (overridden by BEADS_ACTOR or --actor) +# actor: "" + +# Export events (audit trail) to .beads/events.jsonl on each flush/sync +# When enabled, new events are appended incrementally using a high-water mark. +# Use 'bd export --events' to trigger manually regardless of this setting. +# events-export: false + +# Multi-repo configuration (experimental - bd-307) +# Allows hydrating from multiple repositories and routing writes to the correct database +# repos: +# primary: "." # Primary repo (where this database lives) +# additional: # Additional repos to hydrate from (read-only) +# - ~/beads-planning # Personal planning repo +# - ~/work-planning # Work planning repo + +# JSONL backup (periodic export for off-machine recovery) +# Auto-enabled when a git remote exists. Override explicitly: +# backup: +# enabled: false # Disable auto-backup entirely +# interval: 15m # Minimum time between auto-exports +# git-push: false # Disable git push (export locally only) +# git-repo: "" # Separate git repo for backups (default: project repo) + +# Integration settings (access with 'bd config get/set') +# These are stored in the database, not in this file: +# - jira.url +# - jira.project +# - linear.url +# - linear.api-key +# - github.org +# - github.repo diff --git a/.beads/hooks/post-checkout b/.beads/hooks/post-checkout new file mode 100755 index 0000000..67ad327 --- /dev/null +++ b/.beads/hooks/post-checkout @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# --- BEGIN BEADS INTEGRATION v1.0.0 --- +# This section is managed by beads. Do not remove these markers. +if command -v bd >/dev/null 2>&1; then + export BD_GIT_HOOK=1 + _bd_timeout=${BEADS_HOOK_TIMEOUT:-300} + if command -v timeout >/dev/null 2>&1; then + timeout "$_bd_timeout" bd hooks run post-checkout "$@" + _bd_exit=$? + if [ $_bd_exit -eq 124 ]; then + echo >&2 "beads: hook 'post-checkout' timed out after ${_bd_timeout}s — continuing without beads" + _bd_exit=0 + fi + else + bd hooks run post-checkout "$@" + _bd_exit=$? + fi + if [ $_bd_exit -eq 3 ]; then + echo >&2 "beads: database not initialized — skipping hook 'post-checkout'" + _bd_exit=0 + fi + if [ $_bd_exit -ne 0 ]; then exit $_bd_exit; fi +fi +# --- END BEADS INTEGRATION v1.0.0 --- diff --git a/.beads/hooks/post-merge b/.beads/hooks/post-merge new file mode 100755 index 0000000..a731aec --- /dev/null +++ b/.beads/hooks/post-merge @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# --- BEGIN BEADS INTEGRATION v1.0.0 --- +# This section is managed by beads. Do not remove these markers. +if command -v bd >/dev/null 2>&1; then + export BD_GIT_HOOK=1 + _bd_timeout=${BEADS_HOOK_TIMEOUT:-300} + if command -v timeout >/dev/null 2>&1; then + timeout "$_bd_timeout" bd hooks run post-merge "$@" + _bd_exit=$? + if [ $_bd_exit -eq 124 ]; then + echo >&2 "beads: hook 'post-merge' timed out after ${_bd_timeout}s — continuing without beads" + _bd_exit=0 + fi + else + bd hooks run post-merge "$@" + _bd_exit=$? + fi + if [ $_bd_exit -eq 3 ]; then + echo >&2 "beads: database not initialized — skipping hook 'post-merge'" + _bd_exit=0 + fi + if [ $_bd_exit -ne 0 ]; then exit $_bd_exit; fi +fi +# --- END BEADS INTEGRATION v1.0.0 --- diff --git a/.beads/hooks/pre-commit b/.beads/hooks/pre-commit new file mode 100755 index 0000000..02cf2ac --- /dev/null +++ b/.beads/hooks/pre-commit @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# --- BEGIN BEADS INTEGRATION v1.0.0 --- +# This section is managed by beads. Do not remove these markers. +if command -v bd >/dev/null 2>&1; then + export BD_GIT_HOOK=1 + _bd_timeout=${BEADS_HOOK_TIMEOUT:-300} + if command -v timeout >/dev/null 2>&1; then + timeout "$_bd_timeout" bd hooks run pre-commit "$@" + _bd_exit=$? + if [ $_bd_exit -eq 124 ]; then + echo >&2 "beads: hook 'pre-commit' timed out after ${_bd_timeout}s — continuing without beads" + _bd_exit=0 + fi + else + bd hooks run pre-commit "$@" + _bd_exit=$? + fi + if [ $_bd_exit -eq 3 ]; then + echo >&2 "beads: database not initialized — skipping hook 'pre-commit'" + _bd_exit=0 + fi + if [ $_bd_exit -ne 0 ]; then exit $_bd_exit; fi +fi +# --- END BEADS INTEGRATION v1.0.0 --- diff --git a/.beads/hooks/pre-push b/.beads/hooks/pre-push new file mode 100755 index 0000000..7918492 --- /dev/null +++ b/.beads/hooks/pre-push @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# --- BEGIN BEADS INTEGRATION v1.0.0 --- +# This section is managed by beads. Do not remove these markers. +if command -v bd >/dev/null 2>&1; then + export BD_GIT_HOOK=1 + _bd_timeout=${BEADS_HOOK_TIMEOUT:-300} + if command -v timeout >/dev/null 2>&1; then + timeout "$_bd_timeout" bd hooks run pre-push "$@" + _bd_exit=$? + if [ $_bd_exit -eq 124 ]; then + echo >&2 "beads: hook 'pre-push' timed out after ${_bd_timeout}s — continuing without beads" + _bd_exit=0 + fi + else + bd hooks run pre-push "$@" + _bd_exit=$? + fi + if [ $_bd_exit -eq 3 ]; then + echo >&2 "beads: database not initialized — skipping hook 'pre-push'" + _bd_exit=0 + fi + if [ $_bd_exit -ne 0 ]; then exit $_bd_exit; fi +fi +# --- END BEADS INTEGRATION v1.0.0 --- diff --git a/.beads/hooks/prepare-commit-msg b/.beads/hooks/prepare-commit-msg new file mode 100755 index 0000000..c0c3ce1 --- /dev/null +++ b/.beads/hooks/prepare-commit-msg @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# --- BEGIN BEADS INTEGRATION v1.0.0 --- +# This section is managed by beads. Do not remove these markers. +if command -v bd >/dev/null 2>&1; then + export BD_GIT_HOOK=1 + _bd_timeout=${BEADS_HOOK_TIMEOUT:-300} + if command -v timeout >/dev/null 2>&1; then + timeout "$_bd_timeout" bd hooks run prepare-commit-msg "$@" + _bd_exit=$? + if [ $_bd_exit -eq 124 ]; then + echo >&2 "beads: hook 'prepare-commit-msg' timed out after ${_bd_timeout}s — continuing without beads" + _bd_exit=0 + fi + else + bd hooks run prepare-commit-msg "$@" + _bd_exit=$? + fi + if [ $_bd_exit -eq 3 ]; then + echo >&2 "beads: database not initialized — skipping hook 'prepare-commit-msg'" + _bd_exit=0 + fi + if [ $_bd_exit -ne 0 ]; then exit $_bd_exit; fi +fi +# --- END BEADS INTEGRATION v1.0.0 --- diff --git a/.beads/metadata.json b/.beads/metadata.json new file mode 100644 index 0000000..2a5d0ad --- /dev/null +++ b/.beads/metadata.json @@ -0,0 +1,7 @@ +{ + "database": "dolt", + "backend": "dolt", + "dolt_mode": "embedded", + "dolt_database": "claude_memory_mcp", + "project_id": "83137e85-8f85-4af8-9aec-434bc052dc75" +} \ No newline at end of file diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000..963a538 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,26 @@ +{ + "hooks": { + "PreCompact": [ + { + "hooks": [ + { + "command": "bd prime", + "type": "command" + } + ], + "matcher": "" + } + ], + "SessionStart": [ + { + "hooks": [ + { + "command": "bd prime", + "type": "command" + } + ], + "matcher": "" + } + ] + } +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 1662af9..fb63210 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,7 @@ docker/pgdata/ *.db *.sqlite3 .hypothesis/ + +# Beads / Dolt files (added by bd init) +.dolt/ +.beads-credential-key diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..9390d72 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,84 @@ +# Agent Instructions + +This project uses **bd** (beads) for issue tracking. Run `bd prime` for full workflow context. + +## Quick Reference + +```bash +bd ready # Find available work +bd show # View issue details +bd update --claim # Claim work atomically +bd close # Complete work +bd dolt push # Push beads data to remote +``` + +## Non-Interactive Shell Commands + +**ALWAYS use non-interactive flags** with file operations to avoid hanging on confirmation prompts. + +Shell commands like `cp`, `mv`, and `rm` may be aliased to include `-i` (interactive) mode on some systems, causing the agent to hang indefinitely waiting for y/n input. + +**Use these forms instead:** +```bash +# Force overwrite without prompting +cp -f source dest # NOT: cp source dest +mv -f source dest # NOT: mv source dest +rm -f file # NOT: rm file + +# For recursive operations +rm -rf directory # NOT: rm -r directory +cp -rf source dest # NOT: cp -r source dest +``` + +**Other commands that may prompt:** +- `scp` - use `-o BatchMode=yes` for non-interactive +- `ssh` - use `-o BatchMode=yes` to fail instead of prompting +- `apt-get` - use `-y` flag +- `brew` - use `HOMEBREW_NO_AUTO_UPDATE=1` env var + + +## Beads Issue Tracker + +This project uses **bd (beads)** for issue tracking. Run `bd prime` to see full workflow context and commands. + +### Quick Reference + +```bash +bd ready # Find available work +bd show # View issue details +bd update --claim # Claim work +bd close # Complete work +``` + +### Rules + +- Use `bd` for ALL task tracking — do NOT use TodoWrite, TaskCreate, or markdown TODO lists +- Run `bd prime` for detailed command reference and session close protocol +- Use `bd remember` for persistent knowledge — do NOT use MEMORY.md files + +## Session Completion + +**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds. + +**MANDATORY WORKFLOW:** + +1. **File issues for remaining work** - Create issues for anything that needs follow-up +2. **Run quality gates** (if code changed) - Tests, linters, builds +3. **Update issue status** - Close finished work, update in-progress items +4. **PUSH TO REMOTE** - This is MANDATORY: + ```bash + git pull --rebase + bd dolt push + git push + git status # MUST show "up to date with origin" + ``` +5. **Clean up** - Clear stashes, prune remote branches +6. **Verify** - All changes committed AND pushed +7. **Hand off** - Provide context for next session + +**CRITICAL RULES:** +- Work is NOT complete until `git push` succeeds +- NEVER stop before pushing - that leaves work stranded locally +- NEVER say "ready to push when you are" - YOU must push +- If push fails, resolve and retry until it succeeds + diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..50af487 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,69 @@ +# Project Instructions for AI Agents + +This file provides instructions and context for AI coding agents working on this project. + + +## Beads Issue Tracker + +This project uses **bd (beads)** for issue tracking. Run `bd prime` to see full workflow context and commands. + +### Quick Reference + +```bash +bd ready # Find available work +bd show # View issue details +bd update --claim # Claim work +bd close # Complete work +``` + +### Rules + +- Use `bd` for ALL task tracking — do NOT use TodoWrite, TaskCreate, or markdown TODO lists +- Run `bd prime` for detailed command reference and session close protocol +- Use `bd remember` for persistent knowledge — do NOT use MEMORY.md files + +## Session Completion + +**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds. + +**MANDATORY WORKFLOW:** + +1. **File issues for remaining work** - Create issues for anything that needs follow-up +2. **Run quality gates** (if code changed) - Tests, linters, builds +3. **Update issue status** - Close finished work, update in-progress items +4. **PUSH TO REMOTE** - This is MANDATORY: + ```bash + git pull --rebase + bd dolt push + git push + git status # MUST show "up to date with origin" + ``` +5. **Clean up** - Clear stashes, prune remote branches +6. **Verify** - All changes committed AND pushed +7. **Hand off** - Provide context for next session + +**CRITICAL RULES:** +- Work is NOT complete until `git push` succeeds +- NEVER stop before pushing - that leaves work stranded locally +- NEVER say "ready to push when you are" - YOU must push +- If push fails, resolve and retry until it succeeds + + + +## Build & Test + +_Add your build and test commands here_ + +```bash +# Example: +# npm install +# npm test +``` + +## Architecture Overview + +_Add a brief overview of your project architecture_ + +## Conventions & Patterns + +_Add your project-specific conventions here_ From 516d08f43a5fabdbe19af0be130a9df11e485815 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Thu, 7 May 2026 15:58:55 +0000 Subject: [PATCH 09/10] [ci] Switch build to Woodpecker dual-push (DockerHub + Forgejo) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 1 of the registry consolidation rolling out across the homelab — infra/docs/plans/2026-05-07-forgejo-registry-consolidation-plan.md. * New .woodpecker/build.yml runs the test suite, then dual-pushes to viktorbarzin/claude-memory-mcp on DockerHub AND forgejo.viktorbarzin.me/viktor/claude-memory-mcp. * GHA ci.yml renamed to .disabled — its build job would otherwise race the Woodpecker build and clobber Forgejo with a stale image. Re-enable only on rollback. * DockerHub remains the canonical pull source until Phase 3 flips infra/stacks/claude-memory/main.tf image= to Forgejo. Phase 3 also archives this GitHub repo and CLAUDE.md is updated to point `claude plugins install` at the Forgejo URL. Co-Authored-By: Claude Opus 4.7 --- .github/workflows/{ci.yml => ci.yml.disabled} | 0 .woodpecker/build.yml | 51 +++++++++++++++++++ 2 files changed, 51 insertions(+) rename .github/workflows/{ci.yml => ci.yml.disabled} (100%) create mode 100644 .woodpecker/build.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml.disabled similarity index 100% rename from .github/workflows/ci.yml rename to .github/workflows/ci.yml.disabled diff --git a/.woodpecker/build.yml b/.woodpecker/build.yml new file mode 100644 index 0000000..87b3312 --- /dev/null +++ b/.woodpecker/build.yml @@ -0,0 +1,51 @@ +when: + event: push + branch: [main, master] + +clone: + git: + image: woodpeckerci/plugin-git + settings: + attempts: 5 + backoff: 10s + +steps: + - name: test + image: python:3.12-slim + commands: + - pip install --no-cache-dir uv + - uv sync --all-extras + - uv run ruff check src/ tests/ + - uv run mypy src/claude_memory/ + - uv run pytest tests/ -v --tb=short + + - name: build-and-push + image: woodpeckerci/plugin-docker-buildx + depends_on: + - test + settings: + # Dual-push during the Forgejo registry consolidation bake. DockerHub + # stays as the canonical pull target until Phase 3 flips + # infra/stacks/claude-memory/main.tf to Forgejo. The GHA build (CI) + # workflow is .disabled until rollback — see + # docs/plans/2026-05-07-forgejo-registry-consolidation-plan.md + # § "Risk register". + repo: + - viktorbarzin/claude-memory-mcp + - forgejo.viktorbarzin.me/viktor/claude-memory-mcp + logins: + - registry: docker.io + username: + from_secret: dockerhub_username + password: + from_secret: dockerhub_token + - registry: forgejo.viktorbarzin.me + username: + from_secret: forgejo_user + password: + from_secret: forgejo_push_token + dockerfile: docker/Dockerfile + context: . + auto_tag: true + platforms: + - linux/amd64 From 377e89ead5b591790a12d9d64c7fe6001318a947 Mon Sep 17 00:00:00 2001 From: Viktor Barzin Date: Thu, 7 May 2026 23:27:08 +0000 Subject: [PATCH 10/10] trigger after WP forge timeout fix