diff --git a/.gitignore b/.gitignore index d29b2ed2..d6cf6539 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.py[cod] __pycache__/ .pytest_cache/ +.testmondata* .coverage htmlcov/ diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..e14732bf --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,408 @@ +# AGENTS.md - Basic Memory Project Guide + +## Project Overview + +Basic Memory is a local-first knowledge management system built on the Model Context Protocol (MCP). It enables +bidirectional communication between LLMs (like Claude) and markdown files, creating a personal knowledge graph that can +be traversed using links between documents. + +## CODEBASE DEVELOPMENT + +### Project information + +See the [README.md](README.md) file for a project overview. + +### Build and Test Commands + +- Install: `just install` or `pip install -e ".[dev]"` +- Run all tests (SQLite + Postgres): `just test` +- Run all tests against SQLite: `just test-sqlite` +- Run all tests against Postgres: `just test-postgres` (uses testcontainers) +- Run unit tests (SQLite): `just test-unit-sqlite` +- Run unit tests (Postgres): `just test-unit-postgres` +- Run integration tests (SQLite): `just test-int-sqlite` +- Run integration tests (Postgres): `just test-int-postgres` +- Run impacted tests: `just testmon` (pytest-testmon) +- Run MCP smoke test: `just test-smoke` +- Fast local loop: `just fast-check` +- Local consistency check: `just doctor` +- Generate HTML coverage: `just coverage` +- Single test: `pytest tests/path/to/test_file.py::test_function_name` +- Run benchmarks: `pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"` +- Lint: `just lint` or `ruff check . --fix` +- Type check: `just typecheck` or `uv run pyright` +- Format: `just format` or `uv run ruff format .` +- Run all code checks: `just check` (runs lint, format, typecheck, test) +- Create db migration: `just migration "Your migration message"` +- Run development MCP Inspector: `just run-inspector` + +**Note:** Project requires Python 3.12+ (uses type parameter syntax and `type` aliases introduced in 3.12) + +**Postgres Testing:** Uses [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running. + +**Doctor Note:** `just doctor` runs with a temporary HOME/config so it won't touch your local Basic Memory settings. It leaves temp dirs in `/tmp` (safe to ignore or remove). + +**Testmon Note:** When no files have changed, `just testmon` may collect 0 tests. That's expected and means no impacted tests were detected. + +### Code/Test/Verify Loop (fast path) + +1) **Code:** make changes. +2) **Test:** `just fast-check` (lint/format/typecheck + impacted tests + MCP smoke). +3) **Verify:** `just doctor` (end-to-end file ↔ DB loop in a temp project). +4) **Full gate (when needed):** `just test` or `just check` for SQLite + Postgres. + +If testmon is “cold,” the first run may be long. Subsequent runs get much faster. + +### Test Structure + +- `tests/` - Unit tests for individual components (mocked, fast) +- `test-int/` - Integration tests for real-world scenarios (no mocks, realistic) +- Both directories are covered by unified coverage reporting +- Benchmark tests in `test-int/` are marked with `@pytest.mark.benchmark` +- Slow tests are marked with `@pytest.mark.slow` +- Smoke tests are marked with `@pytest.mark.smoke` + +### Code Style Guidelines + +- Line length: 100 characters max +- Python 3.12+ with full type annotations (uses type parameters and type aliases) +- Format with ruff (consistent styling) +- Import order: standard lib, third-party, local imports +- Naming: snake_case for functions/variables, PascalCase for classes +- Prefer async patterns with SQLAlchemy 2.0 +- Use Pydantic v2 for data validation and schemas +- CLI uses Typer for command structure +- API uses FastAPI for endpoints +- Follow the repository pattern for data access +- Tools communicate to api routers via the httpx ASGI client (in process) + +### Code Change Guidelines + +- **Full file read before edits**: Before editing any file, read it in full first to ensure complete context; partial reads lead to corrupted edits +- **Minimize diffs**: Prefer the smallest change that satisfies the request. Avoid unrelated refactors or style rewrites unless necessary for correctness +- **No speculative getattr**: Never use `getattr(obj, "attr", default)` when unsure about attribute names. Check the class definition or source code first +- **Fail fast**: Write code with fail-fast logic by default. Do not swallow exceptions with errors or warnings +- **No fallback logic**: Do not add fallback logic unless explicitly told to and agreed with the user +- **No guessing**: Do not say "The issue is..." before you actually know what the issue is. Investigate first. + +### Literate Programming Style + +Code should tell a story. Comments must explain the "why" and narrative flow, not just the "what". + +**Section Headers:** +For files with multiple phases of logic, add section headers so the control flow reads like chapters: +```python +# --- Authentication --- +# ... auth logic ... + +# --- Data Validation --- +# ... validation logic ... + +# --- Business Logic --- +# ... core logic ... +``` + +**Decision Point Comments:** +For conditionals that materially change behavior (gates, fallbacks, retries, feature flags), add comments with: +- **Trigger**: what condition causes this branch +- **Why**: the rationale (cost, correctness, UX, determinism) +- **Outcome**: what changes downstream + +```python +# Trigger: project has no active sync watcher +# Why: avoid duplicate file system watchers consuming resources +# Outcome: starts new watcher, registers in active_watchers dict +if project_id not in active_watchers: + start_watcher(project_id) +``` + +**Constraint Comments:** +If code exists because of a constraint (async requirements, rate limits, schema compatibility), explain the constraint near the code: +```python +# SQLite requires WAL mode for concurrent read/write access +connection.execute("PRAGMA journal_mode=WAL") +``` + +**What NOT to Comment:** +Avoid comments that restate obvious code: +```python +# Bad - restates code +counter += 1 # increment counter + +# Good - explains why +counter += 1 # track retries for backoff calculation +``` + +### Codebase Architecture + +See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) for detailed architecture documentation. + +**Directory Structure:** +- `/alembic` - Alembic db migrations +- `/api` - FastAPI REST endpoints + `container.py` composition root +- `/cli` - Typer CLI + `container.py` composition root +- `/deps` - Feature-scoped FastAPI dependencies (config, db, projects, repositories, services, importers) +- `/importers` - Import functionality for Claude, ChatGPT, and other sources +- `/markdown` - Markdown parsing and processing +- `/mcp` - MCP server + `container.py` composition root + `clients/` typed API clients +- `/models` - SQLAlchemy ORM models +- `/repository` - Data access layer +- `/schemas` - Pydantic models for validation +- `/services` - Business logic layer +- `/sync` - File synchronization services + `coordinator.py` for lifecycle management + +**Composition Roots:** +Each entrypoint (API, MCP, CLI) has a composition root that: +- Reads `ConfigManager` (the only place that reads global config) +- Resolves runtime mode via `RuntimeMode` enum (TEST > CLOUD > LOCAL) +- Provides dependencies to downstream code explicitly + +**Typed API Clients (MCP):** +MCP tools use typed clients in `mcp/clients/` to communicate with the API: +- `KnowledgeClient` - Entity CRUD operations +- `SearchClient` - Search operations +- `MemoryClient` - Context building +- `DirectoryClient` - Directory listing +- `ResourceClient` - Resource reading +- `ProjectClient` - Project management + +Flow: MCP Tool → Typed Client → HTTP API → Router → Service → Repository + +### Development Notes + +- MCP tools are defined in src/basic_memory/mcp/tools/ +- MCP prompts are defined in src/basic_memory/mcp/prompts/ +- MCP tools should be atomic, composable operations +- Use `textwrap.dedent()` for multi-line string formatting in prompts and tools +- MCP Prompts are used to invoke tools and format content with instructions for an LLM +- Schema changes require Alembic migrations +- SQLite is used for indexing and full text search, files are source of truth +- Testing uses pytest with asyncio support (strict mode) +- Unit tests (`tests/`) use mocks when necessary; integration tests (`test-int/`) use real implementations +- By default, tests run against SQLite (fast, no Docker needed) +- Set `BASIC_MEMORY_TEST_POSTGRES=1` to run against Postgres (uses testcontainers - Docker required) +- Each test runs in a standalone environment with isolated database and tmp_path directory +- CI runs SQLite and Postgres tests in parallel for faster feedback +- Performance benchmarks are in `test-int/test_sync_performance_benchmark.py` +- Use pytest markers: `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests +- **Coverage must stay at 100%**: Write tests for new code. Only use `# pragma: no cover` when tests would require excessive mocking (e.g., TYPE_CHECKING blocks, error handlers that need failure injection, runtime-mode-dependent code paths) + +### Async Client Pattern (Important!) + +**All MCP tools and CLI commands use the context manager pattern for HTTP clients:** + +```python +from basic_memory.mcp.async_client import get_client + +async def my_mcp_tool(): + async with get_client() as client: + # Use client for API calls + response = await call_get(client, "/path") + return response +``` + +**Do NOT use:** +- ❌ `from basic_memory.mcp.async_client import client` (deprecated module-level client) +- ❌ Manual auth header management +- ❌ `inject_auth_header()` (deleted) + +**Key principles:** +- Auth happens at client creation, not per-request +- Proper resource management via context managers +- Supports three modes: Local (ASGI), CLI cloud (HTTP + auth), Cloud app (factory injection) +- Factory pattern enables dependency injection for cloud consolidation + +**For cloud app integration:** +```python +from basic_memory.mcp import async_client + +# Set custom factory before importing tools +async_client.set_client_factory(your_custom_factory) +``` + +See SPEC-16 for full context manager refactor details. + +## BASIC MEMORY PRODUCT USAGE + +### Knowledge Structure + +- Entity: Any concept, document, or idea represented as a markdown file +- Observation: A categorized fact about an entity (`- [category] content`) +- Relation: A directional link between entities (`- relation_type [[Target]]`) +- Frontmatter: YAML metadata at the top of markdown files +- Knowledge representation follows precise markdown format: + - Observations with [category] prefixes + - Relations with WikiLinks [[Entity]] + - Frontmatter with metadata + +### Basic Memory Commands + +**Local Commands:** +- Check sync status: `basic-memory status` +- Doctor check (file <-> DB loop): `basic-memory doctor` +- Import from Claude: `basic-memory import claude conversations` +- Import from ChatGPT: `basic-memory import chatgpt` +- Import from Memory JSON: `basic-memory import memory-json` +- Tool access: `basic-memory tool` (provides CLI access to MCP tools) + - Continue: `basic-memory tool continue-conversation --topic="search"` + +**Project Management:** +- List projects: `basic-memory project list` +- Add project: `basic-memory project add "name" ~/path` +- Project info: `basic-memory project info` +- One-way sync (local -> cloud): `basic-memory project sync` +- Bidirectional sync: `basic-memory project bisync` +- Integrity check: `basic-memory project check` + +**Cloud Commands (requires subscription):** +- Authenticate: `basic-memory cloud login` +- Logout: `basic-memory cloud logout` +- Check cloud status: `basic-memory cloud status` +- Setup cloud sync: `basic-memory cloud setup` +- Manage snapshots: `basic-memory cloud snapshot [create|list|delete|show|browse]` +- Restore from snapshot: `basic-memory cloud restore --snapshot ` + +### MCP Capabilities + +- Basic Memory exposes these MCP tools to LLMs: + + **Content Management:** + - `write_note(title, content, directory, tags)` - Create/update markdown notes with semantic observations and relations + - `read_note(identifier, page, page_size)` - Read notes by title, permalink, or memory:// URL with knowledge graph awareness + - `read_content(path)` - Read raw file content (text, images, binaries) without knowledge graph processing + - `view_note(identifier, page, page_size)` - View notes as formatted artifacts for better readability + - `edit_note(identifier, operation, content)` - Edit notes incrementally (append, prepend, find/replace, replace_section) + - `move_note(identifier, destination_path, is_directory)` - Move notes or directories to new locations, updating database and maintaining links + - `delete_note(identifier, is_directory)` - Delete notes or directories from the knowledge base + + **Knowledge Graph Navigation:** + - `build_context(url, depth, timeframe)` - Navigate the knowledge graph via memory:// URLs for conversation continuity + - `recent_activity(type, depth, timeframe)` - Get recently updated information with specified timeframe (e.g., "1d", "1 week") + - `list_directory(dir_name, depth, file_name_glob)` - Browse directory contents with filtering and depth control + + **Search & Discovery:** + - `search_notes(query, page, page_size, search_type, types, entity_types, after_date)` - Full-text search across all content with advanced filtering options + + **Project Management:** + - `list_memory_projects()` - List all available projects with their status + - `create_memory_project(project_name, project_path, set_default)` - Create new Basic Memory projects + - `delete_project(project_name)` - Delete a project from configuration + + **Visualization:** + - `canvas(nodes, edges, title, directory)` - Generate Obsidian canvas files for knowledge graph visualization + + **ChatGPT-Compatible Tools:** + - `search(query)` - Search across knowledge base (OpenAI actions compatible) + - `fetch(id)` - Fetch full content of a search result document + +- MCP Prompts for better AI interaction: + - `ai_assistant_guide()` - Guidance on effectively using Basic Memory tools for AI assistants + - `continue_conversation(topic, timeframe)` - Continue previous conversations with relevant historical context + - `search(query, after_date)` - Search with detailed, formatted results for better context understanding + - `recent_activity(timeframe)` - View recently changed items with formatted output + +### Cloud Features (v0.15.0+) + +Basic Memory now supports cloud synchronization and storage (requires active subscription): + +**Authentication:** +- JWT-based authentication with subscription validation +- Secure session management with token refresh +- Support for multiple cloud projects + +**Bidirectional Sync:** +- rclone bisync integration for two-way synchronization +- Conflict resolution and integrity verification +- Real-time sync with change detection +- Mount/unmount cloud storage for direct file access + +**Cloud Project Management:** +- Create and manage projects in the cloud +- Toggle between local and cloud modes +- Per-project sync configuration +- Subscription-based access control + +**Security & Performance:** +- Removed .env file loading for improved security +- .gitignore integration (respects gitignored files) +- WAL mode for SQLite performance +- Background relation resolution (non-blocking startup) +- API performance optimizations (SPEC-11) + +**CLI Routing Flags:** + +When cloud mode is enabled, CLI commands route to the cloud API by default. Use `--local` and `--cloud` flags to override: + +```bash +# Force local routing (ignore cloud mode) +basic-memory status --local +basic-memory project list --local + +# Force cloud routing (when cloud mode is disabled) +basic-memory status --cloud +basic-memory project info my-project --cloud +``` + +Key behaviors: +- The local MCP server (`basic-memory mcp`) automatically uses local routing +- This allows simultaneous use of local Claude Desktop and cloud-based clients +- Some commands (like `project default`, `project sync-config`, `project move`) require `--local` in cloud mode since they modify local configuration +- Environment variable `BASIC_MEMORY_FORCE_LOCAL=true` forces local routing globally + +## AI-Human Collaborative Development + +Basic Memory emerged from and enables a new kind of development process that combines human and AI capabilities. Instead +of using AI just for code generation, we've developed a true collaborative workflow: + +1. AI (LLM) writes initial implementation based on specifications and context +2. Human reviews, runs tests, and commits code with any necessary adjustments +3. Knowledge persists across conversations using Basic Memory's knowledge graph +4. Development continues seamlessly across different AI sessions with consistent context +5. Results improve through iterative collaboration and shared understanding + +This approach has allowed us to tackle more complex challenges and build a more robust system than either humans or AI +could achieve independently. + +**Problem-Solving Guidance:** +- If a solution isn't working after reasonable effort, suggest alternative approaches +- Don't persist with a problematic library or pattern when better alternatives exist +- Example: When py-pglite caused cascading test failures, switching to testcontainers-postgres was the right call + +## GitHub Integration + +Basic Memory has taken AI-Human collaboration to the next level by integrating Claude directly into the development workflow through GitHub: + +### GitHub MCP Tools + +Using the GitHub Model Context Protocol server, Claude can now: + +- **Repository Management**: + - View repository files and structure + - Read file contents + - Create new branches + - Create and update files + +- **Issue Management**: + - Create new issues + - Comment on existing issues + - Close and update issues + - Search across issues + +- **Pull Request Workflow**: + - Create pull requests + - Review code changes + - Add comments to PRs + +This integration enables Claude to participate as a full team member in the development process, not just as a code generation tool. Claude's GitHub account ([bm-claudeai](https://github.com/bm-claudeai)) is a member of the Basic Machines organization with direct contributor access to the codebase. + +### Collaborative Development Process + +With GitHub integration, the development workflow includes: + +1. **Direct code review** - Claude can analyze PRs and provide detailed feedback +2. **Contribution tracking** - All of Claude's contributions are properly attributed in the Git history +3. **Branch management** - Claude can create feature branches for implementations +4. **Documentation maintenance** - Claude can keep documentation updated as the code evolves +5. **Code Commits**: ALWAYS sign off commits with `git commit -s` + +This level of integration represents a new paradigm in AI-human collaboration, where the AI assistant becomes a full-fledged team member rather than just a tool for generating code snippets. diff --git a/CLAUDE.md b/CLAUDE.md deleted file mode 100644 index 2406479a..00000000 --- a/CLAUDE.md +++ /dev/null @@ -1,389 +0,0 @@ -# CLAUDE.md - Basic Memory Project Guide - -## Project Overview - -Basic Memory is a local-first knowledge management system built on the Model Context Protocol (MCP). It enables -bidirectional communication between LLMs (like Claude) and markdown files, creating a personal knowledge graph that can -be traversed using links between documents. - -## CODEBASE DEVELOPMENT - -### Project information - -See the [README.md](README.md) file for a project overview. - -### Build and Test Commands - -- Install: `just install` or `pip install -e ".[dev]"` -- Run all tests (SQLite + Postgres): `just test` -- Run all tests against SQLite: `just test-sqlite` -- Run all tests against Postgres: `just test-postgres` (uses testcontainers) -- Run unit tests (SQLite): `just test-unit-sqlite` -- Run unit tests (Postgres): `just test-unit-postgres` -- Run integration tests (SQLite): `just test-int-sqlite` -- Run integration tests (Postgres): `just test-int-postgres` -- Generate HTML coverage: `just coverage` -- Single test: `pytest tests/path/to/test_file.py::test_function_name` -- Run benchmarks: `pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"` -- Lint: `just lint` or `ruff check . --fix` -- Type check: `just typecheck` or `uv run pyright` -- Format: `just format` or `uv run ruff format .` -- Run all code checks: `just check` (runs lint, format, typecheck, test) -- Create db migration: `just migration "Your migration message"` -- Run development MCP Inspector: `just run-inspector` - -**Note:** Project requires Python 3.12+ (uses type parameter syntax and `type` aliases introduced in 3.12) - -**Postgres Testing:** Uses [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running. - -### Test Structure - -- `tests/` - Unit tests for individual components (mocked, fast) -- `test-int/` - Integration tests for real-world scenarios (no mocks, realistic) -- Both directories are covered by unified coverage reporting -- Benchmark tests in `test-int/` are marked with `@pytest.mark.benchmark` -- Slow tests are marked with `@pytest.mark.slow` - -### Code Style Guidelines - -- Line length: 100 characters max -- Python 3.12+ with full type annotations (uses type parameters and type aliases) -- Format with ruff (consistent styling) -- Import order: standard lib, third-party, local imports -- Naming: snake_case for functions/variables, PascalCase for classes -- Prefer async patterns with SQLAlchemy 2.0 -- Use Pydantic v2 for data validation and schemas -- CLI uses Typer for command structure -- API uses FastAPI for endpoints -- Follow the repository pattern for data access -- Tools communicate to api routers via the httpx ASGI client (in process) - -### Code Change Guidelines - -- **Full file read before edits**: Before editing any file, read it in full first to ensure complete context; partial reads lead to corrupted edits -- **Minimize diffs**: Prefer the smallest change that satisfies the request. Avoid unrelated refactors or style rewrites unless necessary for correctness -- **No speculative getattr**: Never use `getattr(obj, "attr", default)` when unsure about attribute names. Check the class definition or source code first -- **Fail fast**: Write code with fail-fast logic by default. Do not swallow exceptions with errors or warnings -- **No fallback logic**: Do not add fallback logic unless explicitly told to and agreed with the user -- **No guessing**: Do not say "The issue is..." before you actually know what the issue is. Investigate first. - -### Literate Programming Style - -Code should tell a story. Comments must explain the "why" and narrative flow, not just the "what". - -**Section Headers:** -For files with multiple phases of logic, add section headers so the control flow reads like chapters: -```python -# --- Authentication --- -# ... auth logic ... - -# --- Data Validation --- -# ... validation logic ... - -# --- Business Logic --- -# ... core logic ... -``` - -**Decision Point Comments:** -For conditionals that materially change behavior (gates, fallbacks, retries, feature flags), add comments with: -- **Trigger**: what condition causes this branch -- **Why**: the rationale (cost, correctness, UX, determinism) -- **Outcome**: what changes downstream - -```python -# Trigger: project has no active sync watcher -# Why: avoid duplicate file system watchers consuming resources -# Outcome: starts new watcher, registers in active_watchers dict -if project_id not in active_watchers: - start_watcher(project_id) -``` - -**Constraint Comments:** -If code exists because of a constraint (async requirements, rate limits, schema compatibility), explain the constraint near the code: -```python -# SQLite requires WAL mode for concurrent read/write access -connection.execute("PRAGMA journal_mode=WAL") -``` - -**What NOT to Comment:** -Avoid comments that restate obvious code: -```python -# Bad - restates code -counter += 1 # increment counter - -# Good - explains why -counter += 1 # track retries for backoff calculation -``` - -### Codebase Architecture - -See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) for detailed architecture documentation. - -**Directory Structure:** -- `/alembic` - Alembic db migrations -- `/api` - FastAPI REST endpoints + `container.py` composition root -- `/cli` - Typer CLI + `container.py` composition root -- `/deps` - Feature-scoped FastAPI dependencies (config, db, projects, repositories, services, importers) -- `/importers` - Import functionality for Claude, ChatGPT, and other sources -- `/markdown` - Markdown parsing and processing -- `/mcp` - MCP server + `container.py` composition root + `clients/` typed API clients -- `/models` - SQLAlchemy ORM models -- `/repository` - Data access layer -- `/schemas` - Pydantic models for validation -- `/services` - Business logic layer -- `/sync` - File synchronization services + `coordinator.py` for lifecycle management - -**Composition Roots:** -Each entrypoint (API, MCP, CLI) has a composition root that: -- Reads `ConfigManager` (the only place that reads global config) -- Resolves runtime mode via `RuntimeMode` enum (TEST > CLOUD > LOCAL) -- Provides dependencies to downstream code explicitly - -**Typed API Clients (MCP):** -MCP tools use typed clients in `mcp/clients/` to communicate with the API: -- `KnowledgeClient` - Entity CRUD operations -- `SearchClient` - Search operations -- `MemoryClient` - Context building -- `DirectoryClient` - Directory listing -- `ResourceClient` - Resource reading -- `ProjectClient` - Project management - -Flow: MCP Tool → Typed Client → HTTP API → Router → Service → Repository - -### Development Notes - -- MCP tools are defined in src/basic_memory/mcp/tools/ -- MCP prompts are defined in src/basic_memory/mcp/prompts/ -- MCP tools should be atomic, composable operations -- Use `textwrap.dedent()` for multi-line string formatting in prompts and tools -- MCP Prompts are used to invoke tools and format content with instructions for an LLM -- Schema changes require Alembic migrations -- SQLite is used for indexing and full text search, files are source of truth -- Testing uses pytest with asyncio support (strict mode) -- Unit tests (`tests/`) use mocks when necessary; integration tests (`test-int/`) use real implementations -- By default, tests run against SQLite (fast, no Docker needed) -- Set `BASIC_MEMORY_TEST_POSTGRES=1` to run against Postgres (uses testcontainers - Docker required) -- Each test runs in a standalone environment with isolated database and tmp_path directory -- CI runs SQLite and Postgres tests in parallel for faster feedback -- Performance benchmarks are in `test-int/test_sync_performance_benchmark.py` -- Use pytest markers: `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests -- **Coverage must stay at 100%**: Write tests for new code. Only use `# pragma: no cover` when tests would require excessive mocking (e.g., TYPE_CHECKING blocks, error handlers that need failure injection, runtime-mode-dependent code paths) - -### Async Client Pattern (Important!) - -**All MCP tools and CLI commands use the context manager pattern for HTTP clients:** - -```python -from basic_memory.mcp.async_client import get_client - -async def my_mcp_tool(): - async with get_client() as client: - # Use client for API calls - response = await call_get(client, "/path") - return response -``` - -**Do NOT use:** -- ❌ `from basic_memory.mcp.async_client import client` (deprecated module-level client) -- ❌ Manual auth header management -- ❌ `inject_auth_header()` (deleted) - -**Key principles:** -- Auth happens at client creation, not per-request -- Proper resource management via context managers -- Supports three modes: Local (ASGI), CLI cloud (HTTP + auth), Cloud app (factory injection) -- Factory pattern enables dependency injection for cloud consolidation - -**For cloud app integration:** -```python -from basic_memory.mcp import async_client - -# Set custom factory before importing tools -async_client.set_client_factory(your_custom_factory) -``` - -See SPEC-16 for full context manager refactor details. - -## BASIC MEMORY PRODUCT USAGE - -### Knowledge Structure - -- Entity: Any concept, document, or idea represented as a markdown file -- Observation: A categorized fact about an entity (`- [category] content`) -- Relation: A directional link between entities (`- relation_type [[Target]]`) -- Frontmatter: YAML metadata at the top of markdown files -- Knowledge representation follows precise markdown format: - - Observations with [category] prefixes - - Relations with WikiLinks [[Entity]] - - Frontmatter with metadata - -### Basic Memory Commands - -**Local Commands:** -- Check sync status: `basic-memory status` -- Import from Claude: `basic-memory import claude conversations` -- Import from ChatGPT: `basic-memory import chatgpt` -- Import from Memory JSON: `basic-memory import memory-json` -- Tool access: `basic-memory tool` (provides CLI access to MCP tools) - - Continue: `basic-memory tool continue-conversation --topic="search"` - -**Project Management:** -- List projects: `basic-memory project list` -- Add project: `basic-memory project add "name" ~/path` -- Project info: `basic-memory project info` -- One-way sync (local -> cloud): `basic-memory project sync` -- Bidirectional sync: `basic-memory project bisync` -- Integrity check: `basic-memory project check` - -**Cloud Commands (requires subscription):** -- Authenticate: `basic-memory cloud login` -- Logout: `basic-memory cloud logout` -- Check cloud status: `basic-memory cloud status` -- Setup cloud sync: `basic-memory cloud setup` -- Manage snapshots: `basic-memory cloud snapshot [create|list|delete|show|browse]` -- Restore from snapshot: `basic-memory cloud restore --snapshot ` - -### MCP Capabilities - -- Basic Memory exposes these MCP tools to LLMs: - - **Content Management:** - - `write_note(title, content, directory, tags)` - Create/update markdown notes with semantic observations and relations - - `read_note(identifier, page, page_size)` - Read notes by title, permalink, or memory:// URL with knowledge graph awareness - - `read_content(path)` - Read raw file content (text, images, binaries) without knowledge graph processing - - `view_note(identifier, page, page_size)` - View notes as formatted artifacts for better readability - - `edit_note(identifier, operation, content)` - Edit notes incrementally (append, prepend, find/replace, replace_section) - - `move_note(identifier, destination_path, is_directory)` - Move notes or directories to new locations, updating database and maintaining links - - `delete_note(identifier, is_directory)` - Delete notes or directories from the knowledge base - - **Knowledge Graph Navigation:** - - `build_context(url, depth, timeframe)` - Navigate the knowledge graph via memory:// URLs for conversation continuity - - `recent_activity(type, depth, timeframe)` - Get recently updated information with specified timeframe (e.g., "1d", "1 week") - - `list_directory(dir_name, depth, file_name_glob)` - Browse directory contents with filtering and depth control - - **Search & Discovery:** - - `search_notes(query, page, page_size, search_type, types, entity_types, after_date)` - Full-text search across all content with advanced filtering options - - **Project Management:** - - `list_memory_projects()` - List all available projects with their status - - `create_memory_project(project_name, project_path, set_default)` - Create new Basic Memory projects - - `delete_project(project_name)` - Delete a project from configuration - - **Visualization:** - - `canvas(nodes, edges, title, directory)` - Generate Obsidian canvas files for knowledge graph visualization - - **ChatGPT-Compatible Tools:** - - `search(query)` - Search across knowledge base (OpenAI actions compatible) - - `fetch(id)` - Fetch full content of a search result document - -- MCP Prompts for better AI interaction: - - `ai_assistant_guide()` - Guidance on effectively using Basic Memory tools for AI assistants - - `continue_conversation(topic, timeframe)` - Continue previous conversations with relevant historical context - - `search(query, after_date)` - Search with detailed, formatted results for better context understanding - - `recent_activity(timeframe)` - View recently changed items with formatted output - -### Cloud Features (v0.15.0+) - -Basic Memory now supports cloud synchronization and storage (requires active subscription): - -**Authentication:** -- JWT-based authentication with subscription validation -- Secure session management with token refresh -- Support for multiple cloud projects - -**Bidirectional Sync:** -- rclone bisync integration for two-way synchronization -- Conflict resolution and integrity verification -- Real-time sync with change detection -- Mount/unmount cloud storage for direct file access - -**Cloud Project Management:** -- Create and manage projects in the cloud -- Toggle between local and cloud modes -- Per-project sync configuration -- Subscription-based access control - -**Security & Performance:** -- Removed .env file loading for improved security -- .gitignore integration (respects gitignored files) -- WAL mode for SQLite performance -- Background relation resolution (non-blocking startup) -- API performance optimizations (SPEC-11) - -**CLI Routing Flags:** - -When cloud mode is enabled, CLI commands route to the cloud API by default. Use `--local` and `--cloud` flags to override: - -```bash -# Force local routing (ignore cloud mode) -basic-memory status --local -basic-memory project list --local - -# Force cloud routing (when cloud mode is disabled) -basic-memory status --cloud -basic-memory project info my-project --cloud -``` - -Key behaviors: -- The local MCP server (`basic-memory mcp`) automatically uses local routing -- This allows simultaneous use of local Claude Desktop and cloud-based clients -- Some commands (like `project default`, `project sync-config`, `project move`) require `--local` in cloud mode since they modify local configuration -- Environment variable `BASIC_MEMORY_FORCE_LOCAL=true` forces local routing globally - -## AI-Human Collaborative Development - -Basic Memory emerged from and enables a new kind of development process that combines human and AI capabilities. Instead -of using AI just for code generation, we've developed a true collaborative workflow: - -1. AI (LLM) writes initial implementation based on specifications and context -2. Human reviews, runs tests, and commits code with any necessary adjustments -3. Knowledge persists across conversations using Basic Memory's knowledge graph -4. Development continues seamlessly across different AI sessions with consistent context -5. Results improve through iterative collaboration and shared understanding - -This approach has allowed us to tackle more complex challenges and build a more robust system than either humans or AI -could achieve independently. - -**Problem-Solving Guidance:** -- If a solution isn't working after reasonable effort, suggest alternative approaches -- Don't persist with a problematic library or pattern when better alternatives exist -- Example: When py-pglite caused cascading test failures, switching to testcontainers-postgres was the right call - -## GitHub Integration - -Basic Memory has taken AI-Human collaboration to the next level by integrating Claude directly into the development workflow through GitHub: - -### GitHub MCP Tools - -Using the GitHub Model Context Protocol server, Claude can now: - -- **Repository Management**: - - View repository files and structure - - Read file contents - - Create new branches - - Create and update files - -- **Issue Management**: - - Create new issues - - Comment on existing issues - - Close and update issues - - Search across issues - -- **Pull Request Workflow**: - - Create pull requests - - Review code changes - - Add comments to PRs - -This integration enables Claude to participate as a full team member in the development process, not just as a code generation tool. Claude's GitHub account ([bm-claudeai](https://github.com/bm-claudeai)) is a member of the Basic Machines organization with direct contributor access to the codebase. - -### Collaborative Development Process - -With GitHub integration, the development workflow includes: - -1. **Direct code review** - Claude can analyze PRs and provide detailed feedback -2. **Contribution tracking** - All of Claude's contributions are properly attributed in the Git history -3. **Branch management** - Claude can create feature branches for implementations -4. **Documentation maintenance** - Claude can keep documentation updated as the code evolves -5. **Code Commits**: ALWAYS sign off commits with `git commit -s` - -This level of integration represents a new paradigm in AI-human collaboration, where the AI assistant becomes a full-fledged team member rather than just a tool for generating code snippets. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 00000000..47dc3e3d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/README.md b/README.md index d5c27a32..af3d3678 100644 --- a/README.md +++ b/README.md @@ -496,16 +496,23 @@ just test - `just test-int-postgres` - Run integration tests against Postgres - `just test-windows` - Run Windows-specific tests (auto-skips on other platforms) - `just test-benchmark` - Run performance benchmark tests +- `just testmon` - Run tests impacted by recent changes (pytest-testmon) +- `just test-smoke` - Run fast MCP end-to-end smoke test +- `just fast-check` - Run fix/format/typecheck + impacted tests + smoke test +- `just doctor` - Run local file <-> DB consistency checks with temp config **Postgres Testing:** Postgres tests use [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running. +**Testmon Note:** When no files have changed, `just testmon` may collect 0 tests. That's expected and means no impacted tests were detected. + **Test Markers:** Tests use pytest markers for selective execution: - `windows` - Windows-specific database optimizations - `benchmark` - Performance tests (excluded from default runs) +- `smoke` - Fast MCP end-to-end smoke tests **Other Development Commands:** ```bash @@ -513,10 +520,17 @@ just install # Install with dev dependencies just lint # Run linting checks just typecheck # Run type checking just format # Format code with ruff +just fast-check # Fast local loop (fix/format/typecheck + testmon + smoke) +just doctor # Local consistency check (temp config) just check # Run all quality checks just migration "msg" # Create database migration ``` +**Local Consistency Check:** +```bash +basic-memory doctor # Verifies file <-> database sync in a temp project +``` + See the [justfile](justfile) for the complete list of development commands. ## License diff --git a/justfile b/justfile index 4ed9aea3..673edc05 100644 --- a/justfile +++ b/justfile @@ -62,6 +62,22 @@ test-int-postgres: BASIC_MEMORY_TEST_POSTGRES=1 uv run pytest -p pytest_mock -v --no-cov test-int fi +# Run tests impacted by recent changes (requires pytest-testmon) +testmon *args: + BASIC_MEMORY_ENV=test uv run pytest -p pytest_mock -v --no-cov --testmon --testmon-forceselect {{args}} + +# Run MCP smoke test (fast end-to-end loop) +test-smoke: + BASIC_MEMORY_ENV=test uv run pytest -p pytest_mock -v --no-cov -m smoke test-int/mcp/test_smoke_integration.py + +# Fast local loop: lint, format, typecheck, impacted tests +fast-check: + just fix + just format + just typecheck + just testmon + just test-smoke + # Reset Postgres test database (drops and recreates schema) # Useful when Alembic migration state gets out of sync during development # Uses credentials from docker-compose-postgres.yml @@ -149,6 +165,18 @@ format: run-inspector: npx @modelcontextprotocol/inspector +# Run doctor checks in an isolated temp home/config +doctor: + #!/usr/bin/env bash + set -euo pipefail + TMP_HOME=$(mktemp -d) + TMP_CONFIG=$(mktemp -d) + HOME="$TMP_HOME" \ + BASIC_MEMORY_ENV=test \ + BASIC_MEMORY_HOME="$TMP_HOME/basic-memory" \ + BASIC_MEMORY_CONFIG_DIR="$TMP_CONFIG" \ + ./.venv/bin/python -m basic_memory.cli.main doctor --local + # Update all dependencies to latest versions update-deps: diff --git a/pyproject.toml b/pyproject.toml index 82a54c85..1670c803 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ markers = [ "slow: Slow-running tests (deselect with '-m \"not slow\"')", "postgres: Tests that run against Postgres backend (deselect with '-m \"not postgres\"')", "windows: Windows-specific tests (deselect with '-m \"not windows\"')", + "smoke: Fast end-to-end smoke tests for MCP flows", ] [tool.ruff] @@ -91,6 +92,7 @@ dev = [ "testcontainers[postgres]>=4.0.0", "psycopg>=3.2.0", "pyright>=1.1.408", + "pytest-testmon>=2.2.0", ] [tool.hatch.version] diff --git a/src/basic_memory/cli/app.py b/src/basic_memory/cli/app.py index c5a4336a..8a02aea7 100644 --- a/src/basic_memory/cli/app.py +++ b/src/basic_memory/cli/app.py @@ -50,7 +50,7 @@ def app_callback( # Skip for 'mcp' command - it has its own lifespan that handles initialization # Skip for API-using commands (status, sync, etc.) - they handle initialization via deps.py # Skip for 'reset' command - it manages its own database lifecycle - skip_init_commands = {"mcp", "status", "sync", "project", "tool", "reset"} + skip_init_commands = {"doctor", "mcp", "status", "sync", "project", "tool", "reset"} if ( not version and ctx.invoked_subcommand is not None diff --git a/src/basic_memory/cli/commands/__init__.py b/src/basic_memory/cli/commands/__init__.py index 8b98c81c..2a5a5e8c 100644 --- a/src/basic_memory/cli/commands/__init__.py +++ b/src/basic_memory/cli/commands/__init__.py @@ -1,11 +1,12 @@ """CLI commands for basic-memory.""" -from . import status, db, import_memory_json, mcp, import_claude_conversations +from . import status, db, doctor, import_memory_json, mcp, import_claude_conversations from . import import_claude_projects, import_chatgpt, tool, project, format __all__ = [ "status", "db", + "doctor", "import_memory_json", "mcp", "import_claude_conversations", diff --git a/src/basic_memory/cli/commands/doctor.py b/src/basic_memory/cli/commands/doctor.py new file mode 100644 index 00000000..5b900cc0 --- /dev/null +++ b/src/basic_memory/cli/commands/doctor.py @@ -0,0 +1,153 @@ +"""Doctor command for local consistency checks.""" + +from __future__ import annotations + +import tempfile +import uuid +from pathlib import Path + +from loguru import logger +from mcp.server.fastmcp.exceptions import ToolError +from rich.console import Console +import typer + +from basic_memory.cli.app import app +from basic_memory.cli.commands.command_utils import run_with_cleanup +from basic_memory.cli.commands.routing import force_routing, validate_routing_flags +from basic_memory.markdown.entity_parser import EntityParser +from basic_memory.markdown.markdown_processor import MarkdownProcessor +from basic_memory.markdown.schemas import EntityFrontmatter, EntityMarkdown +from basic_memory.mcp.async_client import get_client +from basic_memory.mcp.clients import KnowledgeClient, ProjectClient, SearchClient +from basic_memory.mcp.tools.utils import call_post +from basic_memory.schemas.base import Entity +from basic_memory.schemas.project_info import ProjectInfoRequest +from basic_memory.schemas.search import SearchQuery +from basic_memory.schemas import SyncReportResponse + +console = Console() + + +async def run_doctor() -> None: + """Run local consistency checks for file <-> database flows.""" + console.print("[blue]Running Basic Memory doctor checks...[/blue]") + + project_name = f"doctor-{uuid.uuid4().hex[:8]}" + api_note_title = "Doctor API Note" + manual_note_title = "Doctor Manual Note" + manual_permalink = "doctor/manual-note" + + with tempfile.TemporaryDirectory() as temp_dir: + temp_path = Path(temp_dir) + + async with get_client() as client: + project_client = ProjectClient(client) + project_request = ProjectInfoRequest( + name=project_name, + path=str(temp_path), + set_default=False, + ) + + project_id: str | None = None + + try: + status = await project_client.create_project(project_request.model_dump()) + if not status.new_project: + raise ValueError("Failed to create doctor project") + project_id = status.new_project.external_id + console.print(f"[green]OK[/green] Created doctor project: {project_name}") + + # --- DB -> File: create an entity via API --- + knowledge_client = KnowledgeClient(client, project_id) + api_note = Entity( + title=api_note_title, + directory="doctor", + entity_type="note", + content_type="text/markdown", + content=f"# {api_note_title}\n\n- [note] API to file check", + entity_metadata={"tags": ["doctor"]}, + ) + api_result = await knowledge_client.create_entity(api_note.model_dump(), fast=False) + + api_file = temp_path / api_result.file_path + if not api_file.exists(): + raise ValueError(f"API note file missing: {api_result.file_path}") + + api_text = api_file.read_text(encoding="utf-8") + if api_note_title not in api_text: + raise ValueError("API note content missing from file") + + console.print("[green]OK[/green] API write created file") + + # --- File -> DB: write markdown file directly, then sync --- + parser = EntityParser(temp_path) + processor = MarkdownProcessor(parser) + manual_markdown = EntityMarkdown( + frontmatter=EntityFrontmatter( + metadata={ + "title": manual_note_title, + "type": "note", + "permalink": manual_permalink, + "tags": ["doctor"], + } + ), + content=f"# {manual_note_title}\n\n- [note] File to DB check", + ) + + manual_path = temp_path / "doctor" / "manual-note.md" + await processor.write_file(manual_path, manual_markdown) + console.print("[green]OK[/green] Manual file written") + + sync_response = await call_post( + client, + f"/v2/projects/{project_id}/sync?force_full=true&run_in_background=false", + ) + sync_report = SyncReportResponse.model_validate(sync_response.json()) + if sync_report.total == 0: + raise ValueError("Sync did not detect any changes") + + console.print("[green]OK[/green] Sync indexed manual file") + + search_client = SearchClient(client, project_id) + search_query = SearchQuery(title=manual_note_title) + search_results = await search_client.search( + search_query.model_dump(), page=1, page_size=5 + ) + if not any(result.title == manual_note_title for result in search_results.results): + raise ValueError("Manual note not found in search index") + + console.print("[green]OK[/green] Search confirmed manual file") + + status_response = await call_post(client, f"/v2/projects/{project_id}/status") + status_report = SyncReportResponse.model_validate(status_response.json()) + if status_report.total != 0: + raise ValueError("Project status not clean after sync") + + console.print("[green]OK[/green] Status clean after sync") + + finally: + if project_id: + await project_client.delete_project(project_id) + + console.print("[green]Doctor checks passed.[/green]") + + +@app.command() +def doctor( + local: bool = typer.Option( + False, "--local", help="Force local API routing (ignore cloud mode)" + ), + cloud: bool = typer.Option(False, "--cloud", help="Force cloud API routing"), +) -> None: + """Run local consistency checks to verify file/database sync.""" + try: + validate_routing_flags(local, cloud) + with force_routing(local=local, cloud=cloud): + run_with_cleanup(run_doctor()) + except (ToolError, ValueError) as e: + console.print(f"[red]Doctor failed: {e}[/red]") + raise typer.Exit(code=1) + except Exception as e: + logger.error(f"Doctor failed: {e}") + typer.echo(f"Doctor failed: {e}", err=True) + raise typer.Exit(code=1) # pragma: no cover diff --git a/src/basic_memory/cli/main.py b/src/basic_memory/cli/main.py index 38f4dd03..2e0041d7 100644 --- a/src/basic_memory/cli/main.py +++ b/src/basic_memory/cli/main.py @@ -6,6 +6,7 @@ from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover cloud, db, + doctor, import_chatgpt, import_claude_conversations, import_claude_projects, diff --git a/test-int/mcp/test_smoke_integration.py b/test-int/mcp/test_smoke_integration.py new file mode 100644 index 00000000..3e38e697 --- /dev/null +++ b/test-int/mcp/test_smoke_integration.py @@ -0,0 +1,55 @@ +"""Smoke test for MCP end-to-end flow.""" + +import pytest +from fastmcp import Client + + +@pytest.mark.asyncio +@pytest.mark.smoke +async def test_mcp_smoke_flow(mcp_server, app, test_project): + """Verify write -> read -> search -> build_context works end-to-end.""" + + async with Client(mcp_server) as client: + title = "Smoke Test Note" + content = "# Smoke Test Note\n\n- [note] MCP smoke flow" + + await client.call_tool( + "write_note", + { + "project": test_project.name, + "title": title, + "directory": "smoke", + "content": content, + "tags": "smoke,test", + }, + ) + + read_result = await client.call_tool( + "read_note", + { + "project": test_project.name, + "identifier": title, + }, + ) + assert len(read_result.content) == 1 + assert title in read_result.content[0].text + + search_result = await client.call_tool( + "search_notes", + { + "project": test_project.name, + "query": "Smoke Test Note", + }, + ) + assert len(search_result.content) == 1 + assert title in search_result.content[0].text + + context_result = await client.call_tool( + "build_context", + { + "project": test_project.name, + "url": "smoke/*", + }, + ) + assert len(context_result.content) == 1 + assert title in context_result.content[0].text diff --git a/tests/markdown/test_entity_parser_error_handling.py b/tests/markdown/test_entity_parser_error_handling.py index 63894ee6..d31b8f7e 100644 --- a/tests/markdown/test_entity_parser_error_handling.py +++ b/tests/markdown/test_entity_parser_error_handling.py @@ -349,7 +349,7 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path): # Create a file with user's custom frontmatter (like the bug report) test_file = tmp_path / "litnote.md" content = dedent( - ''' + """ --- title: "My Document Title" type: litnote @@ -360,7 +360,7 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path): --- # Content here... - ''' + """ ).strip() test_file.write_text(content) @@ -379,7 +379,9 @@ async def test_frontmatter_roundtrip_preserves_user_metadata(tmp_path): output = dump_frontmatter(post) # The output should NOT have duplicate frontmatter or metadata: {} key - assert output.count("---") == 2, "Should have exactly one frontmatter block (two --- delimiters)" + assert output.count("---") == 2, ( + "Should have exactly one frontmatter block (two --- delimiters)" + ) assert "metadata:" not in output, "Should not have 'metadata:' key in output" assert "citekey: authorTitleYear2024" in output, "User's citekey should be preserved" assert "type: litnote" in output, "User's type should be preserved" diff --git a/tests/mcp/test_tool_contracts.py b/tests/mcp/test_tool_contracts.py new file mode 100644 index 00000000..3abdf2b0 --- /dev/null +++ b/tests/mcp/test_tool_contracts.py @@ -0,0 +1,88 @@ +"""Tool contract tests for MCP tool signatures.""" + +from __future__ import annotations + +import inspect + +from basic_memory.mcp import tools + + +EXPECTED_TOOL_SIGNATURES: dict[str, list[str]] = { + "build_context": ["url", "project", "depth", "timeframe", "page", "page_size", "max_related"], + "canvas": ["nodes", "edges", "title", "directory", "project"], + "create_memory_project": ["project_name", "project_path", "set_default"], + "delete_note": ["identifier", "is_directory", "project"], + "delete_project": ["project_name"], + "edit_note": [ + "identifier", + "operation", + "content", + "project", + "section", + "find_text", + "expected_replacements", + ], + "fetch": ["id"], + "list_directory": ["dir_name", "depth", "file_name_glob", "project"], + "list_memory_projects": [], + "move_note": ["identifier", "destination_path", "is_directory", "project"], + "read_content": ["path", "project"], + "read_note": ["identifier", "project", "page", "page_size"], + "recent_activity": ["type", "depth", "timeframe", "project"], + "search": ["query"], + "search_by_metadata": ["filters", "project", "limit", "offset"], + "search_notes": [ + "query", + "project", + "page", + "page_size", + "search_type", + "types", + "entity_types", + "after_date", + "metadata_filters", + "tags", + "status", + ], + "view_note": ["identifier", "project", "page", "page_size"], + "write_note": ["title", "content", "directory", "project", "tags", "note_type"], +} + + +TOOL_FUNCTIONS: dict[str, object] = { + "build_context": tools.build_context, + "canvas": tools.canvas, + "create_memory_project": tools.create_memory_project, + "delete_note": tools.delete_note, + "delete_project": tools.delete_project, + "edit_note": tools.edit_note, + "fetch": tools.fetch, + "list_directory": tools.list_directory, + "list_memory_projects": tools.list_memory_projects, + "move_note": tools.move_note, + "read_content": tools.read_content, + "read_note": tools.read_note, + "recent_activity": tools.recent_activity, + "search": tools.search, + "search_by_metadata": tools.search_by_metadata, + "search_notes": tools.search_notes, + "view_note": tools.view_note, + "write_note": tools.write_note, +} + + +def _signature_params(tool_obj: object) -> list[str]: + fn = tool_obj.fn + params = [] + for param in inspect.signature(fn).parameters.values(): + if param.name == "context": + continue + params.append(param.name) + return params + + +def test_mcp_tool_signatures_are_stable(): + assert set(TOOL_FUNCTIONS.keys()) == set(EXPECTED_TOOL_SIGNATURES.keys()) + + for tool_name, tool_obj in TOOL_FUNCTIONS.items(): + assert _signature_params(tool_obj) == EXPECTED_TOOL_SIGNATURES[tool_name] diff --git a/tests/repository/test_search_repository.py b/tests/repository/test_search_repository.py index 0209ce22..1466b126 100644 --- a/tests/repository/test_search_repository.py +++ b/tests/repository/test_search_repository.py @@ -934,9 +934,7 @@ async def test_search_metadata_filters_numeric_comparisons(search_repository, se {"schema": {"confidence": 0.4}}, ) - results = await search_repository.search( - metadata_filters={"schema.confidence": {"$gt": 0.7}} - ) + results = await search_repository.search(metadata_filters={"schema.confidence": {"$gt": 0.7}}) assert {result.id for result in results} == {entity_high.id} results = await search_repository.search( diff --git a/uv.lock b/uv.lock index 9ff28ceb..2fcbd20a 100644 --- a/uv.lock +++ b/uv.lock @@ -187,6 +187,7 @@ dev = [ { name = "pytest-asyncio" }, { name = "pytest-cov" }, { name = "pytest-mock" }, + { name = "pytest-testmon" }, { name = "pytest-xdist" }, { name = "ruff" }, { name = "testcontainers" }, @@ -242,6 +243,7 @@ dev = [ { name = "pytest-asyncio", specifier = ">=0.24.0" }, { name = "pytest-cov", specifier = ">=4.1.0" }, { name = "pytest-mock", specifier = ">=3.12.0" }, + { name = "pytest-testmon", specifier = ">=2.2.0" }, { name = "pytest-xdist", specifier = ">=3.0.0" }, { name = "ruff", specifier = ">=0.1.6" }, { name = "testcontainers", extras = ["postgres"], specifier = ">=4.0.0" }, @@ -1723,6 +1725,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5a/cc/06253936f4a7fa2e0f48dfe6d851d9c56df896a9ab09ac019d70b760619c/pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d", size = 10095, upload-time = "2025-09-16T16:37:25.734Z" }, ] +[[package]] +name = "pytest-testmon" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4d/1d/3e4230cc67cd6205bbe03c3527500c0ccaf7f0c78b436537eac71590ee4a/pytest_testmon-2.2.0.tar.gz", hash = "sha256:01f488e955ed0e0049777bee598bf1f647dd524e06f544c31a24e68f8d775a51", size = 23108, upload-time = "2025-12-01T07:30:24.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/55/ebb3c2f59fb089f08d00f764830d35780fc4e4c41dffcadafa3264682b65/pytest_testmon-2.2.0-py3-none-any.whl", hash = "sha256:2604ca44a54d61a2e830d9ce828b41a837075e4ebc1f81b148add8e90d34815b", size = 25199, upload-time = "2025-12-01T07:30:23.623Z" }, +] + [[package]] name = "pytest-xdist" version = "3.8.0"