Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
*.py[cod]
__pycache__/
.pytest_cache/
.testmondata*
.coverage
htmlcov/

Expand Down
408 changes: 408 additions & 0 deletions AGENTS.md

Large diffs are not rendered by default.

389 changes: 0 additions & 389 deletions CLAUDE.md

This file was deleted.

1 change: 1 addition & 0 deletions CLAUDE.md
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -496,27 +496,41 @@ just test
- `just test-int-postgres` - Run integration tests against Postgres
- `just test-windows` - Run Windows-specific tests (auto-skips on other platforms)
- `just test-benchmark` - Run performance benchmark tests
- `just testmon` - Run tests impacted by recent changes (pytest-testmon)
- `just test-smoke` - Run fast MCP end-to-end smoke test
- `just fast-check` - Run fix/format/typecheck + impacted tests + smoke test
- `just doctor` - Run local file <-> DB consistency checks with temp config

**Postgres Testing:**

Postgres tests use [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running.

**Testmon Note:** When no files have changed, `just testmon` may collect 0 tests. That's expected and means no impacted tests were detected.

**Test Markers:**

Tests use pytest markers for selective execution:
- `windows` - Windows-specific database optimizations
- `benchmark` - Performance tests (excluded from default runs)
- `smoke` - Fast MCP end-to-end smoke tests

**Other Development Commands:**
```bash
just install # Install with dev dependencies
just lint # Run linting checks
just typecheck # Run type checking
just format # Format code with ruff
just fast-check # Fast local loop (fix/format/typecheck + testmon + smoke)
just doctor # Local consistency check (temp config)
just check # Run all quality checks
just migration "msg" # Create database migration
```

**Local Consistency Check:**
```bash
basic-memory doctor # Verifies file <-> database sync in a temp project
```

See the [justfile](justfile) for the complete list of development commands.

## License
Expand Down
28 changes: 28 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,22 @@ test-int-postgres:
BASIC_MEMORY_TEST_POSTGRES=1 uv run pytest -p pytest_mock -v --no-cov test-int
fi

# Run tests impacted by recent changes (requires pytest-testmon)
testmon *args:
BASIC_MEMORY_ENV=test uv run pytest -p pytest_mock -v --no-cov --testmon --testmon-forceselect {{args}}

# Run MCP smoke test (fast end-to-end loop)
test-smoke:
BASIC_MEMORY_ENV=test uv run pytest -p pytest_mock -v --no-cov -m smoke test-int/mcp/test_smoke_integration.py

# Fast local loop: lint, format, typecheck, impacted tests
fast-check:
just fix
just format
just typecheck
just testmon
just test-smoke

# Reset Postgres test database (drops and recreates schema)
# Useful when Alembic migration state gets out of sync during development
# Uses credentials from docker-compose-postgres.yml
Expand Down Expand Up @@ -149,6 +165,18 @@ format:
run-inspector:
npx @modelcontextprotocol/inspector

# Run doctor checks in an isolated temp home/config
doctor:
#!/usr/bin/env bash
set -euo pipefail
TMP_HOME=$(mktemp -d)
TMP_CONFIG=$(mktemp -d)
HOME="$TMP_HOME" \
BASIC_MEMORY_ENV=test \
BASIC_MEMORY_HOME="$TMP_HOME/basic-memory" \
BASIC_MEMORY_CONFIG_DIR="$TMP_CONFIG" \
./.venv/bin/python -m basic_memory.cli.main doctor --local


# Update all dependencies to latest versions
update-deps:
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ markers = [
"slow: Slow-running tests (deselect with '-m \"not slow\"')",
"postgres: Tests that run against Postgres backend (deselect with '-m \"not postgres\"')",
"windows: Windows-specific tests (deselect with '-m \"not windows\"')",
"smoke: Fast end-to-end smoke tests for MCP flows",
]

[tool.ruff]
Expand All @@ -91,6 +92,7 @@ dev = [
"testcontainers[postgres]>=4.0.0",
"psycopg>=3.2.0",
"pyright>=1.1.408",
"pytest-testmon>=2.2.0",
]

[tool.hatch.version]
Expand Down
2 changes: 1 addition & 1 deletion src/basic_memory/cli/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def app_callback(
# Skip for 'mcp' command - it has its own lifespan that handles initialization
# Skip for API-using commands (status, sync, etc.) - they handle initialization via deps.py
# Skip for 'reset' command - it manages its own database lifecycle
skip_init_commands = {"mcp", "status", "sync", "project", "tool", "reset"}
skip_init_commands = {"doctor", "mcp", "status", "sync", "project", "tool", "reset"}
if (
not version
and ctx.invoked_subcommand is not None
Expand Down
3 changes: 2 additions & 1 deletion src/basic_memory/cli/commands/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
"""CLI commands for basic-memory."""

from . import status, db, import_memory_json, mcp, import_claude_conversations
from . import status, db, doctor, import_memory_json, mcp, import_claude_conversations
from . import import_claude_projects, import_chatgpt, tool, project, format

__all__ = [
"status",
"db",
"doctor",
"import_memory_json",
"mcp",
"import_claude_conversations",
Expand Down
153 changes: 153 additions & 0 deletions src/basic_memory/cli/commands/doctor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
"""Doctor command for local consistency checks."""

from __future__ import annotations

import tempfile
import uuid
from pathlib import Path

from loguru import logger
from mcp.server.fastmcp.exceptions import ToolError
from rich.console import Console
import typer

from basic_memory.cli.app import app
from basic_memory.cli.commands.command_utils import run_with_cleanup
from basic_memory.cli.commands.routing import force_routing, validate_routing_flags
from basic_memory.markdown.entity_parser import EntityParser
from basic_memory.markdown.markdown_processor import MarkdownProcessor
from basic_memory.markdown.schemas import EntityFrontmatter, EntityMarkdown
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.clients import KnowledgeClient, ProjectClient, SearchClient
from basic_memory.mcp.tools.utils import call_post
from basic_memory.schemas.base import Entity
from basic_memory.schemas.project_info import ProjectInfoRequest
from basic_memory.schemas.search import SearchQuery
from basic_memory.schemas import SyncReportResponse

console = Console()


async def run_doctor() -> None:
"""Run local consistency checks for file <-> database flows."""
console.print("[blue]Running Basic Memory doctor checks...[/blue]")

project_name = f"doctor-{uuid.uuid4().hex[:8]}"
api_note_title = "Doctor API Note"
manual_note_title = "Doctor Manual Note"
manual_permalink = "doctor/manual-note"

with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)

async with get_client() as client:
project_client = ProjectClient(client)
project_request = ProjectInfoRequest(
name=project_name,
path=str(temp_path),
set_default=False,
)

project_id: str | None = None

try:
status = await project_client.create_project(project_request.model_dump())
if not status.new_project:
raise ValueError("Failed to create doctor project")
project_id = status.new_project.external_id
console.print(f"[green]OK[/green] Created doctor project: {project_name}")

# --- DB -> File: create an entity via API ---
knowledge_client = KnowledgeClient(client, project_id)
api_note = Entity(
title=api_note_title,
directory="doctor",
entity_type="note",
content_type="text/markdown",
content=f"# {api_note_title}\n\n- [note] API to file check",
entity_metadata={"tags": ["doctor"]},
)
api_result = await knowledge_client.create_entity(api_note.model_dump(), fast=False)

api_file = temp_path / api_result.file_path
if not api_file.exists():
raise ValueError(f"API note file missing: {api_result.file_path}")

api_text = api_file.read_text(encoding="utf-8")
if api_note_title not in api_text:
raise ValueError("API note content missing from file")

console.print("[green]OK[/green] API write created file")

# --- File -> DB: write markdown file directly, then sync ---
parser = EntityParser(temp_path)
processor = MarkdownProcessor(parser)
manual_markdown = EntityMarkdown(
frontmatter=EntityFrontmatter(
metadata={
"title": manual_note_title,
"type": "note",
"permalink": manual_permalink,
"tags": ["doctor"],
}
),
content=f"# {manual_note_title}\n\n- [note] File to DB check",
)

manual_path = temp_path / "doctor" / "manual-note.md"
await processor.write_file(manual_path, manual_markdown)
console.print("[green]OK[/green] Manual file written")

sync_response = await call_post(
client,
f"/v2/projects/{project_id}/sync?force_full=true&run_in_background=false",
)
sync_report = SyncReportResponse.model_validate(sync_response.json())
if sync_report.total == 0:
raise ValueError("Sync did not detect any changes")

console.print("[green]OK[/green] Sync indexed manual file")

search_client = SearchClient(client, project_id)
search_query = SearchQuery(title=manual_note_title)
search_results = await search_client.search(
search_query.model_dump(), page=1, page_size=5
)
if not any(result.title == manual_note_title for result in search_results.results):
raise ValueError("Manual note not found in search index")

console.print("[green]OK[/green] Search confirmed manual file")

status_response = await call_post(client, f"/v2/projects/{project_id}/status")
status_report = SyncReportResponse.model_validate(status_response.json())
if status_report.total != 0:
raise ValueError("Project status not clean after sync")

console.print("[green]OK[/green] Status clean after sync")

finally:
if project_id:
await project_client.delete_project(project_id)

console.print("[green]Doctor checks passed.[/green]")


@app.command()
def doctor(
local: bool = typer.Option(
False, "--local", help="Force local API routing (ignore cloud mode)"
),
cloud: bool = typer.Option(False, "--cloud", help="Force cloud API routing"),
) -> None:
"""Run local consistency checks to verify file/database sync."""
try:
validate_routing_flags(local, cloud)
with force_routing(local=local, cloud=cloud):
run_with_cleanup(run_doctor())
except (ToolError, ValueError) as e:
console.print(f"[red]Doctor failed: {e}[/red]")
raise typer.Exit(code=1)
except Exception as e:
logger.error(f"Doctor failed: {e}")
typer.echo(f"Doctor failed: {e}", err=True)
raise typer.Exit(code=1) # pragma: no cover
1 change: 1 addition & 0 deletions src/basic_memory/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover
cloud,
db,
doctor,
import_chatgpt,
import_claude_conversations,
import_claude_projects,
Expand Down
55 changes: 55 additions & 0 deletions test-int/mcp/test_smoke_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
"""Smoke test for MCP end-to-end flow."""

import pytest
from fastmcp import Client


@pytest.mark.asyncio
@pytest.mark.smoke
async def test_mcp_smoke_flow(mcp_server, app, test_project):
"""Verify write -> read -> search -> build_context works end-to-end."""

async with Client(mcp_server) as client:
title = "Smoke Test Note"
content = "# Smoke Test Note\n\n- [note] MCP smoke flow"

await client.call_tool(
"write_note",
{
"project": test_project.name,
"title": title,
"directory": "smoke",
"content": content,
"tags": "smoke,test",
},
)

read_result = await client.call_tool(
"read_note",
{
"project": test_project.name,
"identifier": title,
},
)
assert len(read_result.content) == 1
assert title in read_result.content[0].text

search_result = await client.call_tool(
"search_notes",
{
"project": test_project.name,
"query": "Smoke Test Note",
},
)
assert len(search_result.content) == 1
assert title in search_result.content[0].text

context_result = await client.call_tool(
"build_context",
{
"project": test_project.name,
"url": "smoke/*",
},
)
assert len(context_result.content) == 1
assert title in context_result.content[0].text
8 changes: 5 additions & 3 deletions tests/markdown/test_entity_parser_error_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path):
# Create a file with user's custom frontmatter (like the bug report)
test_file = tmp_path / "litnote.md"
content = dedent(
'''
"""
---
title: "My Document Title"
type: litnote
Expand All @@ -360,7 +360,7 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path):
---

# Content here...
'''
"""
).strip()
test_file.write_text(content)

Expand All @@ -379,7 +379,9 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path):
output = dump_frontmatter(post)

# The output should NOT have duplicate frontmatter or metadata: {} key
assert output.count("---") == 2, "Should have exactly one frontmatter block (two --- delimiters)"
assert output.count("---") == 2, (
"Should have exactly one frontmatter block (two --- delimiters)"
)
assert "metadata:" not in output, "Should not have 'metadata:' key in output"
assert "citekey: authorTitleYear2024" in output, "User's citekey should be preserved"
assert "type: litnote" in output, "User's type should be preserved"
Loading
Loading