feat: auto-split large memories at store time (>500 chars)
When content exceeds 500 chars, it's automatically split into multiple memories on paragraph boundaries. Each chunk gets the same category, tags (with part-N-of-M suffix), keywords, and importance. Removes the old 800 char hard limit from the Pydantic model. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c88dd03cce
commit
73aefda82e
3 changed files with 56 additions and 27 deletions
|
|
@ -29,15 +29,12 @@ def test_roundtrip_memory_store(mem):
|
|||
assert restored.tags == mem.tags
|
||||
|
||||
|
||||
@given(content=st.text(min_size=801, max_size=1000))
|
||||
@given(content=st.text(min_size=801, max_size=2000))
|
||||
@settings(max_examples=20)
|
||||
def test_content_over_max_rejected(content):
|
||||
"""Content exceeding 800 chars is rejected."""
|
||||
try:
|
||||
MemoryStore(content=content)
|
||||
assert False, "Should have raised ValidationError"
|
||||
except ValidationError:
|
||||
pass
|
||||
def test_content_over_500_accepted(content):
|
||||
"""Content over 500 chars is accepted by the model (auto-split happens server-side)."""
|
||||
mem = MemoryStore(content=content)
|
||||
assert len(mem.content) > 500
|
||||
|
||||
|
||||
@given(importance=st.floats().filter(lambda x: x < 0.0 or x > 1.0).filter(lambda x: x == x)) # exclude NaN
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue