diff --git a/.changelog/v0.12.x.md b/.changelog/v0.12.48.md similarity index 99% rename from .changelog/v0.12.x.md rename to .changelog/v0.12.48.md index b9e27788..0a2e9bff 100644 --- a/.changelog/v0.12.x.md +++ b/.changelog/v0.12.48.md @@ -1,4 +1,4 @@ -# Release v0.12.x - Changelog +# Release v0.12.48 - Changelog Released: YYYY-MM-DD diff --git a/.changelog/v0.13.x.md b/.changelog/v0.13.x.md new file mode 100644 index 00000000..7faf69a2 --- /dev/null +++ b/.changelog/v0.13.x.md @@ -0,0 +1,255 @@ +# Release v0.13.x - Changelog + +Released: YYYY-MM-DD + +## Overview + +This release expands the genome dashboard with comprehensive cancer predisposition markers and adds user feedback collection for completed agent tasks. + +## Features + +### Agent Task Feedback System +- **Thumbs up/down feedback buttons** on completed agent cards — users can rate task outcomes as positive, negative, or neutral +- **Optional comment input** for more detailed feedback with neutral rating option +- **Feedback statistics API** (`GET /api/cos/feedback/stats`) aggregating satisfaction rate, breakdown by task type, and recent comments +- **Real-time visual feedback** confirmation with toast notifications and button state changes +- **Proactive feedback toast notifications** — when agents complete tasks, a toast notification appears with inline thumbs up/down buttons, prompting for immediate feedback without requiring navigation to the Agents tab +- Persisted in agent state for historical analysis and learning improvements +- Addresses COS-GOALS.md requirement: "User satisfaction feedback" metric tracking and "Prompt user for feedback on completed tasks" + +### Digital Twin — Cancer Predisposition Marker Expansion +- **37 new cancer predisposition SNP markers** added across 7 new cancer subcategories plus expanded tumor suppression and thyroid categories +- **Breast & Ovarian Cancer** (12 markers): FGFR2, TOX3, MAP3K1, 2q35, LSP1, 5p12/FGF10, SLC4A7, CASP8 (protective), 3 BRCA Ashkenazi founder mutations (FDA-approved), BNC2 (ovarian) +- **Prostate Cancer** (6 markers): Three independent 8q24 region signals, MSMB, HNF1B, 17q24 +- **Colorectal Cancer** (4 markers): SMAD7 (protective), EIF3H, 11q23, GREM1 +- **Lung Cancer** (3 markers): CHRNA3, CHRNA5, CHRNA3/5 region — nicotinic receptor variants affecting smoking intensity and lung cancer risk +- **Melanoma** (2 markers): MC1R R160W, TYRP1 +- **Bladder Cancer** (3 markers): NAT2 slow acetylator tag, 8q24, TP63 +- **Digestive Cancer** (3 markers): ABO blood group (pancreatic), NR5A2 (pancreatic protective), MUC1 (gastric) +- **Tumor Suppression** expanded (3 markers): TERT multi-cancer, MDM2 p53 attenuator, CHEK2 DNA damage response +- **Thyroid** expanded (2 markers): NKX2-1, DIRC3 papillary thyroid cancer +- **Reorganized cancer categories**: Replaced single `cancer_risk` with specific cancer type subcategories for clearer organization; moved KRAS to tumor suppression (multi-cancer) + +### Moltworld Platform Support +- **New platform: Moltworld** — a shared voxel world (480x480 grid) where AI agents move, build structures, think out loud, communicate, and earn SIM tokens +- **Agent registration** via Moltworld API with automatic `agentId`-based auth (no claim step needed, unlike Moltbook) +- **World actions**: Join/move (heartbeat), explore (random/targeted movement), build (place/remove blocks), think (visible thoughts), say (messaging) +- **SIM token economy**: 0.1 SIM/hour while online; balance tracking via dedicated endpoint +- **Rate limiting**: Per-action cooldowns (join: 5s, build: 1s, think: 5s) plus global 60 req/min sliding window +- **Scheduled automation**: 5 new action types — `mw_heartbeat`, `explore`, `build`, `say`, `interact` — with platform-aware schedule form filtering +- **Platform-aware UI**: Quick actions switch between Moltbook (Engage, Check Posts) and Moltworld (Explore, Build) based on selected account; schedule action dropdown filters by platform +- **Connection testing**: Profile + balance check for Moltworld accounts in the Test Connection flow + +### Moltworld "World" Tab +- **Dedicated World tab** on agent detail page — visible only for agents with Moltworld accounts, hidden for Moltbook-only agents +- **World Status card** showing online/offline state, position, SIM balance, earning rate, total earned, and online time with refresh button +- **Nearby Agents panel** populated from join/explore responses with agent names, positions, and distances +- **Recent Messages panel** showing messages and thoughts from nearby agents (expire after 5 min per API) +- **Move/Explore actions** with X/Y coordinate inputs and optional thinking text, plus "Random Explore" for random movement +- **Think action** for sending visible thoughts to nearby agents +- **Build action** with X/Y/Z inputs, block type selection (wood/stone/dirt/grass/leaves), and place/remove toggle +- **Say action** for broadcasting messages or sending DMs to specific agent IDs +- **Rate limit badges** with animated cooldown timers matching ToolsTab pattern +- **Deep-linkable** at `/agents/:id/world` +- New server endpoints: `POST /think` (direct thought API) and `POST /say` (message via join) + +### Moltworld Explorer Script +- **Standalone exploration script** (`server/scripts/moltworld-explore.mjs`) — wanders the world, thinks AI-generated thoughts, greets nearby agents, and earns SIM tokens +- **LM Studio integration** — generates contextual thoughts using local `gpt-oss-20b` model (configurable via `LMSTUDIO_MODEL`), with automatic fallback to curated thoughts when unavailable +- **Configurable intervals** — join frequency (default 3-9 min random), model, LM Studio URL, and duration all configurable via env vars +- **COS autonomous job** (`job-moltworld-exploration`) — daily scheduled job to run the explorer script +- **Efficient heartbeat** — joins every 3-9 minutes (within 10-minute expiry window) instead of every 8 seconds, reducing API calls while staying online + +### Moltworld Sky Maze Builder +- **Procedural maze builder** (`server/scripts/moltworld-maze.mjs`) — generates and builds a stone maze floating in the sky using recursive backtracking DFS +- **Configurable** — maze size (3-12 cells), position, height (Z), wall height, block type, optional seed for reproducible mazes +- **Rate-limit compliant** — 1.1s between builds, auto-heartbeat every 50 blocks, handles 429 responses with retry +- **Resumable** — progress saved to `data/moltworld-maze-progress.json` after every block; interrupted builds resume from where they left off +- **Progress thoughts** — agent thinks out loud while building ("Placing block 150/449... the maze takes shape") + +### Platform Tab Improvements +- **Renamed "Tools" tab to "Moltbook"** (📚 icon) for clarity — tab only visible for agents with Moltbook accounts +- **Conditional platform tabs** — Moltbook and World tabs auto-hide when agent lacks the corresponding platform account + +### Moltworld Real-time WebSocket Integration +- **Server-side WebSocket relay** (`server/services/moltworldWs.js`) — connects to `wss://moltworld.io/ws` and streams real-time events (agent movements, thoughts, messages, interactions, presence snapshots) through the existing Socket.IO infrastructure +- **Connection control routes** at `/api/agents/tools/moltworld/ws/` — connect, disconnect, status, plus WebSocket-based move, think, nearby, and interact endpoints for lower-latency actions +- **Live Feed panel** in World tab — scrollable real-time event stream with per-event icons, agent names, event type badges, timestamps, and "(live)" indicator when connected +- **Connection banner** with status dot (green=connected, yellow=connecting, gray=disconnected) and Connect/Disconnect button +- **WebSocket-based actions** — when WS is connected, Move and Think actions route through WebSocket for lower latency; Say DMs use the interact endpoint +- **Real-time Nearby Agents** — presence data from WebSocket automatically merges with REST-fetched nearby agents, with "(live)" indicator +- **Explorer script WS mode** — `MOLTWORLD_USE_WS=true` env var routes movements through the PortOS WS relay when available, with automatic REST fallback +- **Automatic reconnection** with exponential backoff (2s to 60s cap); no auto-connect on server restart (requires explicit Connect click) +- **Activity logging** — interaction events from WebSocket are logged to the agent activity service for the Activity tab + +### Moltworld Activity History +- **Persistent activity log** in the World tab — all Moltworld actions (explore, think, say, build, heartbeat) are now logged to the agent activity service and displayed in a scrollable "Activity History" card +- **Filterable by action type** — dropdown to filter by Explore, Think, Say, Build, Heartbeat, or show All +- **Paginated** with "Load More" button for browsing older activity +- **Auto-refresh** after each manual action so new entries appear immediately +- **Consistent `mw_` prefix** for all Moltworld action types (`mw_explore`, `mw_build`, `mw_think`, `mw_say`, `mw_heartbeat`, `mw_interact`) — distinguishes them from Moltbook actions in the global activity view +- Activity logging added to think, say, and join routes (previously only explore and build were logged); WebSocket-initiated actions (move, think, interact) also log activity + +### Moltworld Action Queue +- **In-memory action queue** per agent — schedule future actions (explore, think, build, say) from the UI that the explore script picks up and executes +- **Action Queue card** at top of the right column in World tab with real-time status updates via Socket.IO +- **Add Action form** — inline toggleable form with action type dropdown and dynamic parameter fields (coordinates, thought text, message, block type, etc.) +- **Cancel pending items** — click X on any pending queue item to remove it +- **Queue status badges** — pending (gray), executing (blue pulse), completed (green), failed (red) +- **Explore script integration** — before each auto-generated action, the script checks the queue for manually-scheduled items and executes them first, respecting rate limits +- **Queue API endpoints**: `GET /queue/:agentId`, `POST /queue`, `DELETE /queue/:id`, `POST /queue/:id/complete`, `POST /queue/:id/fail` + +### Dashboard — Upcoming Tasks Preview Widget +- **New UpcomingTasksWidget** on the main dashboard — shows what tasks the CoS will work on next, providing visibility into the autonomous schedule +- **Ready vs scheduled distinction** — tasks ready to run now highlighted in green, scheduled tasks show countdown timer until eligible +- **Learning-adjusted indicators** — tasks with performance-adjusted intervals show trend arrows (up for high-success fast-tracked tasks, down for struggling tasks with extended cooldowns) +- **Task metadata** — each row shows task icon, description, interval type (daily/weekly/rotation), last run time, and success rate when available +- **New API endpoint** `GET /api/cos/upcoming` returns upcoming tasks sorted by eligibility time with timing info +- **Service function `getUpcomingTasks()`** in taskSchedule.js calculates eligibility, formats timing, and respects learning-based interval adjustments +- Addresses user preference for "quick visual summaries" and helps understand CoS behavior + +### Custom PM2_HOME Support for Isolated App Instances +- **Per-app PM2_HOME configuration** — apps can now specify a custom `pm2Home` path to manage processes in a separate PM2 daemon instance, useful for apps that ship with their own PM2 setup (e.g., apps with embedded PM2 ecosystems) +- **Smart process grouping** — the apps list route now groups apps by their PM2_HOME and queries each PM2 instance separately, so process statuses are correctly fetched from the right daemon +- **All PM2 operations support custom home** — start, stop, restart, delete, status, and logs all pass the custom PM2_HOME via environment variable when specified +- **Backwards compatible** — apps without `pm2Home` continue using the default shared PM2 daemon + +### Digital Twin — Aesthetic Taste Prompting Spec (brain idea 608dc733) +- **Promoted brain idea 608dc733** ("Prompting Aesthetic Taste Docs via Digital Twin") from dormant idea to concrete P2.5 spec in PLAN.md under M42: Unified Digital Twin Identity System +- **Defined 7 aesthetic domains** for taste capture: movies, music, visual art, architecture, food, plus 2 new domains (fashion/texture and digital/interface aesthetics) +- **Designed conversational prompting flow** — twin reads existing identity documents (BOOKS.md, AUDIO.md, CREATIVE.md, PREFERENCES.md) and enrichment answers to generate personalized follow-up questions that reference things it already knows about the user +- **Specified data model** — taste-profile.json v2.0.0 schema (extended with source tracking, generated question storage, identity context provenance) and aesthetics.json as the canonical synthesized profile +- **Relaxed P1 prerequisite** — P2.5 can read identity documents directly without the Identity orchestrator, unblocking implementation +- **9-step implementation plan** covering new sections, context aggregation, LLM-powered question generation, API routes, UI updates, and data migration + +### Load Sample Providers from AI Toolkit +- **"Load Samples" button** on the AI Providers page — discovers new sample providers from `portos-ai-toolkit` that aren't yet in your configuration +- **Per-provider "Add" button** with name, type badge, command/endpoint, models, and env vars displayed for each sample +- **"Add All" bulk action** when multiple samples are available +- **Toolkit `getSampleProviders()` API** — reads toolkit defaults and overlays PortOS-specific samples (e.g., `claude-code-bedrock`), filtering out already-configured providers +- **`claude-code-bedrock` sample provider** — pre-configured Bedrock provider with `CLAUDE_CODE_USE_BEDROCK=1` env var, disabled by default +- Updated `portos-ai-toolkit` to 0.5.0 + +### Dependency Management +- **Switched `portos-ai-toolkit` from git to npm** — dependency now resolves from npm registry (`^0.5.0`) instead of a GitHub git tag, ensuring consistent version resolution and pulling latest toolkit updates (0.4.0 → 0.5.0) + +## Improvements + +### Brain Scheduler — Failure Cooldown +- **30-minute retry cooldown** after failed daily digest or weekly review tasks — prevents retry spam by tracking last failure time and skipping missed-task checks during the cooldown period +- Failed catch-up attempts now log "(retry in 30min)" for visibility +- Eliminates excessive retry attempts that would otherwise occur every minute after a failure + +### CoS — Consolidated Duplicate Repo Maintenance Jobs +- **Merged `job-git-maintenance` into `job-github-repo-maintenance`** — the two jobs had overlapping scope (local git repo checks vs GitHub API audits) and are now a single unified job covering both local and remote repository maintenance +- Removed `git-maintenance` skill template; consolidated local repo checks (uncommitted changes, stale branches) into the `github-repo-maintenance` skill template +- **Enabled `job-github-repo-maintenance`** — activated the unified GitHub repo maintenance job for weekly automated audits covering security alerts, stale dependencies, missing CI/README/license, uncommitted local changes, and stale branches +- Cleaned up stale `job-git-maintenance` data entry left behind after the merge + +### CoS Health Check — Auto-restart Errored PM2 Processes +- **Automated recovery for errored PM2 processes** — the health check now auto-restarts any process in `errored` state instead of just reporting it as an issue +- Attempts `pm2 restart ` for each errored process and logs success/failure per process +- Failed restarts surface as `error` issues; successful restarts surface as `warning` issues so the UI reflects what happened +- Eliminates manual intervention for transient crashes (e.g., bituniverse) that PM2's built-in `max_restarts` has already exhausted + +### PM2 Standardizer Reliability +- **Fixed timeout race condition** in `executeCliAnalysis` — the promise could be rejected twice (once by timeout, once by the killed child's close event); now uses a `settled` guard and clears the timer on completion +- **Increased default timeout** from 120s to 180s for both CLI and API providers — CLI LLM analysis of projects without existing ecosystem configs often exceeds 2 minutes +- **Added API fetch timeout** — `executeApiAnalysis` now uses `AbortController` with the same provider timeout, preventing indefinite hangs on unresponsive API endpoints +- **Added timing diagnostics** — `analyzeApp` now logs the provider name, type, and elapsed time for each analysis, making slow provider issues easier to diagnose + +### CoS Dashboard Activity Calendar +- **GitHub-style activity heatmap** — the CosDashboardWidget now displays a compact contribution-style calendar showing the last 8 weeks of CoS task completion +- **Color-coded intensity** — squares colored by volume (more tasks = brighter) and success rate (green for 80%+, amber for 50-79%, red for <50%) +- **Streak indicator** — shows current daily streak with flame icon badge +- **Interactive tooltips** — hover on any day to see date, task count, and success rate +- **Today highlight** — current day marked with accent ring +- **Summary stats** — total tasks, active days, and overall success rate displayed below the calendar +- **Mini legend** — visual guide showing Less-to-More color scale +- **New API endpoint** — `GET /api/cos/productivity/calendar?weeks=N` returns calendar-optimized daily activity data with intensity calculations +- Directly addresses user preference for "quick visual summaries of recent task completion directly on the dashboard" + +### Goal Progress Dashboard Widget +- **Visual goal progress tracking** — new dashboard widget maps completed CoS tasks to goal categories defined in COS-GOALS.md +- **5 goal categories** tracked: Codebase Quality, Self-Improvement, Documentation, User Engagement, and System Health — each with icon, color, and task type mappings +- **Engagement levels** — progress bars show relative task volume per goal (low/medium/high based on completed tasks) +- **Success rate indicators** — each goal shows its success percentage with color-coded status (green >80%, amber 50-80%, red <50%) +- **Insights row** — highlights most active and least active (needs attention) goal areas +- **Parses COS-GOALS.md** — dynamically reads Active Goals section to extract goal names and items +- **Task-to-goal mapping** — analyzes task types and keywords from learning data to attribute tasks to appropriate goals +- **New API endpoints** — `GET /api/cos/goal-progress` (full data) and `GET /api/cos/goal-progress/summary` (compact for dashboard) +- Addresses COS-GOALS.md "Metrics to Track" requirement and user preference for key performance metrics displayed prominently + +### Brain Ideas — Status Tracking & Done State +- **Added `status` field to brain ideas** (`active`/`done`) — ideas can now be marked as done/ingested once they've been promoted to PLAN.md or implemented, preventing them from generating duplicate auto-tasks +- **Status filter dropdown** for ideas in the Memory tab — filter ideas by active or done status, matching the existing pattern for projects and admin items +- **Quick "mark done" button** (checkmark icon) on project, idea, and admin record cards — one-click status update without opening the edit form +- **Idea status badges** with color coding (yellow=active, gray=done) in the record list view +- **Updated autonomous job prompts** — project-review job now queries `?status=active` to skip done projects; brain-processing job skips done ideas when creating CoS tasks +- **Marked genome-related brain items as done** — Genome Section Integration project (0e6a0332), Aesthetic Taste idea (608dc733), and Chronotype Trait idea (284dd487) all marked done with notes indicating ingestion into PLAN.md M42 +- Prevents the duplicate task generation loop where done brain items kept generating new CoS tasks for already-spec'd work + +### Genome Section UI — Upload Flow, Category Navigation & Status Dashboard +- **Step-by-step 23andMe upload instructions** — guided flow showing exactly how to download raw data from 23andMe with numbered steps +- **Post-upload pipeline overview** — 3-step visual showing Upload → Scan → Explore to set expectations for new users +- **Category overview dashboard** — visual grid of all marker categories with proportional status bars (beneficial/typical/concern/major), clickable to jump to that category +- **Status legend** — color key explaining the meaning of each status in the overview grid +- **Sticky category quick-nav bar** — horizontal scrollable bar that sticks to the top when scrolling, with emoji-labeled buttons for each category +- **Scroll-to-category** — clicking any category in the overview grid or quick-nav bar smooth-scrolls to that category's marker cards +- Unblocks downstream Chronotype, Aesthetic Taste, and Goal Tracker implementations by establishing the genome section as a navigable, category-organized reference + +### Braille Spinner Loading Indicators +- **New `` component** — compact, monospace-friendly animated spinner using braille unicode frames (`⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏` at 80ms) that fits PortOS's dark terminal aesthetic +- **Replaced plain "Loading..." text** across 16 loading states — Dashboard, Apps, Uploads, Chief of Staff, Prompt Manager, Agent Detail, Agent List, all agent tabs (Overview, Tools, World, Published, Schedules, Activity), DevTools (History, Runs, Processes, Git, Usage), and the lazy-load fallback in App.jsx +- Zero new dependencies — the braille frame data is inlined directly (~100 bytes), inspired by the `unicode-animations` npm package concept without the supply-chain risk + +### CyberCity UX Improvements +- **Improved building status colors** — stopped buildings now use red (#ef4444) with a faster pulse for urgency instead of amber; not-started uses violet (#8b5cf6) for better distinction from online; archived uses lighter slate (#64748b) for visibility against the dark background +- **Enhanced HUD readability** — increased font sizes across all HUD panels, stronger background opacity (85%) and border contrast, larger touch targets (40px settings button, 32px agent bar items) +- **Collapsible activity log** — click header to collapse/expand; entries now show colored level indicators (dot per severity), hover-to-expand truncated messages, improved scrollbar styling +- **Settings panel polish** — section descriptions explain each group's purpose, tooltip descriptions on every toggle and slider, sticky header when scrolling, visual section dividers, corrected slider gradient for min-offset ranges +- **Building click navigation** — clicking a building now navigates to that specific app's page (`/apps/:id`) instead of the generic apps list +- **Holographic building labels** — show process count, status icons, and "CLICK TO VIEW" hint; stopped buildings use red border for consistency +- **Status legend** — new HUD panel explaining building color meanings (online/stopped/not started/archived) +- **Improved loading screen** — larger title with glow, animated progress bar, and loading subtitle +- **Ground improvements** — larger grid cells (2x) with brighter section lines, enhanced fog density for depth +- **Agent bar improvements** — larger touch targets with glow on status dots, better scrollbar styling, tooltip with agent state details + +## Fixes + +### GitHub Repo Maintenance Skill Template — Improved Reliability +- **Fixed Dependabot alert detection** — the skill template now specifies the correct `dependabot/alerts` API endpoint with open-state filtering, replacing the non-functional `vulnerability-alerts` endpoint that caused the Feb 15 first run to report 0 security alerts (actual: 196 across 6 repos) +- **Increased repo list limit** from 100 to 200 — the account has 100+ repos; lower limits caused the Feb 16 run to only audit 40 of 97 non-archived repos +- **Added open PR auditing** (step 5) — checks for accumulated bot PRs (Dependabot, Snyk) and human PRs needing attention; the Feb 16 run found 45 open PRs across 6 repos that were invisible to the original template +- **Added result verification** — if all repos return 0 security alerts, the template now instructs the agent to verify against known-alert repos before reporting "all clear" +- **Added duplicate task prevention** — template now instructs agents to check existing CoS tasks before creating new ones to avoid duplication across weekly runs + +### Agent Resume — System Task Support +- **Resume button now available for system agent tasks** — previously the Resume button was hidden for system (CoS-internal) agents; now both user and system agents show the Resume button on completed agent cards +- **Task type preservation** — resumed system tasks correctly create new `internal` type tasks (written to `COS-TASKS.md`) instead of defaulting to user tasks; ensures system task workflows maintain their proper task type +- **Visual indicator** — the Resume modal shows "Resume System Agent Task" title when resuming a system agent, clarifying what type of task will be created + +### Dashboard — Suppress "Not Found" Toast Errors for Optional Widgets +- **Fixed multiple "Not found" toast errors** appearing when navigating to the dashboard — the CosDashboardWidget and GoalProgressWidget fire API calls for supplementary data (activity calendar, learning summary, goal progress, etc.) that correctly handle missing data via `.catch(() => null)`, but the centralized `api.request()` function was showing error toasts before the error was thrown and caught +- **Added `silent` option to `api.request()`** — callers can pass `{ silent: true }` to suppress automatic toast notifications for non-critical requests, while still throwing errors for programmatic handling +- Updated CosDashboardWidget (4 API calls) and GoalProgressWidget (1 API call) to use `{ silent: true }` since these widgets gracefully degrade when data is unavailable + +### Autofixer — PATH Inheritance for nvm Environments +- **Fixed `pm2 jlist` failing with "env: node: No such file or directory"** — the autofixer process spawns child shells via `exec()` that didn't inherit the nvm PATH; added `PATH: process.env.PATH` to the autofixer's PM2 env config so child processes can find node/pm2 + +### PM2 Scripts — Removed Project-Local PM2_HOME Isolation +- **Simplified PM2 npm scripts** — removed `PM2_HOME=.pm2` and local binary path from all pm2:* scripts; now uses global pm2 installation directly +- **Native pm2 commands work** — `pm2 logs`, `pm2 status`, `pm2 monit` etc. now work without wrappers since PortOS shares the global `~/.pm2` daemon +- **Cleaned up stale local .pm2 directory** — the project-local PM2 daemon was causing "invalid pid" errors when state got out of sync with the global daemon + +### CoS Sub-Nav Overflow Scroll Arrows +- **Horizontal scroll arrows for CoS tab navigation** — the 12-tab sub-nav now shows left/right chevron buttons with gradient fades when tabs overflow, allowing navigation to all tabs (Briefing through Config) without collapsing the side panel +- **Scroll detection** — arrows appear only when there's content in that direction; smooth scroll animation on click +- **Desktop-friendly** — previously users had no visible way to scroll on desktop; now scroll arrows provide clear navigation affordance + +### Custom PM2_HOME Auto-Detection +- **Automatic pm2Home extraction during app detection/ingestion** — the streaming detector now parses ecosystem.config.js for `const PM2_HOME = ...` declarations and extracts the custom path +- **Template literal support** — handles common patterns like `` `${require("os").homedir()}/.pm2-grace` `` by resolving the homedir at detection time using `[^`]+` regex pattern that properly captures nested quotes +- **Refresh config updates pm2Home** — the `/api/apps/:id/refresh-config` endpoint now auto-populates pm2Home when re-parsing the ecosystem config, so existing apps can pick up custom PM2_HOME settings without re-ingestion +- **Detection uses correct PM2_HOME for status checks** — when detecting a new app with a custom PM2_HOME, the PM2 status check runs against the correct daemon instance instead of the default +- **Fixed template literal parsing** — the regex now correctly handles nested quotes in template literals (e.g., `require("os")`) instead of stopping at the first quote character diff --git a/.gitignore b/.gitignore index b389bbc1..61c66341 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,4 @@ Thumbs.db # Browser service node_modules (code is committed) browser/node_modules/ +.pm2 diff --git a/CLAUDE.md b/CLAUDE.md index 439cecaa..24871c96 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -15,10 +15,10 @@ npm run dev cd server && npm test cd server && npm run test:watch # Watch mode -# Production -npm run pm2:start -npm run pm2:stop -npm run pm2:logs +# Production (npm scripts or pm2 directly) +pm2 start ecosystem.config.cjs +pm2 stop ecosystem.config.cjs +pm2 logs ``` ## Architecture diff --git a/PLAN.md b/PLAN.md index 9d2f4f43..95534297 100644 --- a/PLAN.md +++ b/PLAN.md @@ -67,13 +67,14 @@ pm2 logs - [x] **M38**: Agent Tools - AI content generation, feed browsing, and autonomous engagement for Moltbook agents - [x] **M39**: Agent-Centric Drill-Down - Redesigned Agents section with agent-first hierarchy, deep-linkable URLs, and scoped sub-tabs - [x] **M41**: CyberCity Immersive Overhaul - Procedural synthwave audio, enhanced post-processing (chromatic aberration, film grain, color grading), reflective wet-street ground, settings system, and atmosphere enhancements +- [x] **M43**: Moltworld Platform Support - Second platform integration for AI agents in a shared voxel world with movement, building, thinking, messaging, and SIM token economy ### Planned - [ ] **M7**: App Templates - Template management and app scaffolding from templates - [ ] **M34 P3,P5-P7**: Digital Twin - Behavioral feedback loop, multi-modal capture, advanced testing, personas - [ ] **M40**: Agent Skill System - Task-type-specific prompt templates with routing logic, negative examples, and embedded workflows for improved agent accuracy and reliability -- [ ] **M42**: Unified Digital Twin Identity System - Connect Genome, Chronotype, Aesthetic Taste, and Mortality-Aware Goals into a single coherent Identity architecture +- [ ] **M42**: Unified Digital Twin Identity System - Connect Genome (117 markers, 32 categories), Chronotype (5 sleep markers + behavioral), Aesthetic Taste (P2 complete, P2.5 adds twin-aware prompting), and Mortality-Aware Goals into a single coherent Identity architecture with cross-insights engine --- @@ -85,9 +86,9 @@ Four separate workstreams converge on the same vision: a personal digital twin t | Subsystem | Current State | Location | |-----------|--------------|----------| -| **Genome** | Fully implemented: 23andMe upload, 37+ curated SNP markers across 13 categories, ClinVar integration | `server/services/genome.js`, `GenomeTab.jsx`, `data/digital-twin/genome.json` | -| **Chronotype** | Partially exists: 2 sleep markers (CLOCK rs1801260, DEC2 rs57875989) in genome + `daily_routines` enrichment category | `curatedGenomeMarkers.js` sleep category, `ENRICHMENT_CATEGORIES.daily_routines` | -| **Aesthetic Taste** | Partially exists: `aesthetics` enrichment category + book/movie/music list-based enrichments | `ENRICHMENT_CATEGORIES.aesthetics`, `BOOKS.md`, `MOVIES.md`, `AUDIO.md` | +| **Genome** | Fully implemented: 23andMe upload, 117 curated SNP markers across 32 categories, ClinVar integration, epigenetic tracking | `server/services/genome.js`, `GenomeTab.jsx`, `data/digital-twin/genome.json` | +| **Chronotype** | Genetic data ready: 5 sleep/circadian markers (CLOCK rs1801260, DEC2 rs57875989, PER2 rs35333999, CRY1 rs2287161, MTNR1B rs10830963) + `daily_routines` enrichment category. Derivation service not yet built | `curatedGenomeMarkers.js` sleep category, `ENRICHMENT_CATEGORIES.daily_routines` | +| **Aesthetic Taste** | P2 complete: Taste questionnaire with 5 sections (movies, music, visual_art, architecture, food), conversational Q&A, AI summary generation. Enrichment categories also feed taste data from book/movie/music lists | `TasteTab.jsx`, `taste-questionnaire.js`, `data/digital-twin/taste-profile.json` | | **Goal Tracking** | Partially exists: `COS-GOALS.md` for CoS missions, `TASKS.md` for user tasks, `EXISTENTIAL.md` soul doc | `data/COS-GOALS.md`, `data/TASKS.md`, `data/digital-twin/EXISTENTIAL.md` | These should be unified under a single **Identity** architecture so the twin can reason across all dimensions (e.g., "your CLOCK gene says evening chronotype — schedule deep work after 8pm" or "given your longevity markers and age, here's how to prioritize your 10-year goals"). @@ -102,7 +103,7 @@ These should be unified under a single **Identity** architecture so the twin can "createdAt": "2026-02-12T00:00:00.000Z", "updatedAt": "2026-02-12T00:00:00.000Z", "sections": { - "genome": { "status": "active", "dataFile": "genome.json", "markerCount": 37, "lastScanAt": "..." }, + "genome": { "status": "active", "dataFile": "genome.json", "markerCount": 117, "categoryCount": 32, "lastScanAt": "..." }, "chronotype": { "status": "active", "dataFile": "chronotype.json", "derivedFrom": ["genome:sleep", "enrichment:daily_routines"] }, "aesthetics": { "status": "active", "dataFile": "aesthetics.json", "derivedFrom": ["enrichment:aesthetics", "enrichment:favorite_books", "enrichment:favorite_movies", "enrichment:music_taste"] }, "goals": { "status": "active", "dataFile": "goals.json" } @@ -122,7 +123,10 @@ Derived from genome sleep markers + daily_routines enrichment answers + user ove "sources": { "genetic": { "clockGene": { "rsid": "rs1801260", "genotype": "T/C", "signal": "mild_evening" }, - "dec2": { "rsid": "rs57875989", "genotype": "G/G", "signal": "standard_sleep_need" } + "dec2": { "rsid": "rs57875989", "genotype": "G/G", "signal": "standard_sleep_need" }, + "per2": { "rsid": "rs35333999", "genotype": "C/C", "signal": "standard_circadian" }, + "cry1": { "rsid": "rs2287161", "genotype": "C/C", "signal": "standard_period" }, + "mtnr1b": { "rsid": "rs10830963", "genotype": "T/T", "signal": "normal_melatonin_receptor" } }, "behavioral": { "preferredWakeTime": "08:30", @@ -141,7 +145,7 @@ Derived from genome sleep markers + daily_routines enrichment answers + user ove } ``` -**Derivation logic**: Genome sleep markers provide the genetic baseline. The `daily_routines` enrichment answers provide behavioral confirmation. When genetic and behavioral signals agree, confidence is high. When they disagree, surface the conflict for user review. Caffeine cutoff cross-references caffeine metabolism markers (CYP1A2 rs762551, ADA rs73598374). +**Derivation logic**: Five genome sleep markers provide the genetic baseline: CLOCK (evening preference), DEC2 (sleep duration need), PER2 (circadian period), CRY1 (delayed sleep phase), MTNR1B (melatonin receptor / nighttime glucose). The `daily_routines` enrichment answers provide behavioral confirmation. When genetic and behavioral signals agree, confidence is high. When they disagree, surface the conflict for user review. Caffeine cutoff cross-references caffeine metabolism markers (CYP1A2 rs762551, ADA rs73598374). MTNR1B status also informs late-eating recommendations. #### Entity: Aesthetic Taste Profile (`aesthetics.json`) @@ -259,8 +263,9 @@ Consolidates scattered aesthetic data into a structured profile. ``` **Cross-cutting links** (stored in `identity.json.crossLinks`): -- `genome:sleep` → `chronotype:genetic` (CLOCK/DEC2 markers feed chronotype) +- `genome:sleep` → `chronotype:genetic` (CLOCK/DEC2/PER2/CRY1/MTNR1B markers feed chronotype) - `genome:caffeine` → `chronotype:recommendations.caffeineCutoff` (CYP1A2/ADA markers set cutoff) +- `genome:sleep:mtnr1b` → `chronotype:recommendations.lateEatingCutoff` (MTNR1B impairs nighttime glucose) - `genome:longevity` + `genome:cardiovascular` → `goals:lifeExpectancyEstimate` (risk-adjusted lifespan) - `enrichment:daily_routines` → `chronotype:behavioral` (self-reported schedule) - `enrichment:aesthetics` + `enrichment:favorite_*` + `enrichment:music_taste` → `aesthetics:profile` (taste extraction) @@ -284,22 +289,23 @@ The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** t │ └───────────────────────────────────────────────────────┘ │ │ │ │ ┌─ Genome Summary Card ─────────────────────────────────┐ │ -│ │ 37 markers scanned across 13 categories │ │ -│ │ Key findings: 3 beneficial, 2 concern, 1 major │ │ +│ │ 117 markers scanned across 32 categories │ │ +│ │ Key findings: ~20 beneficial, ~40 concern, ~5 major │ │ │ │ [View Full Genome →] │ │ │ └───────────────────────────────────────────────────────┘ │ │ │ │ ┌─ Chronotype Card ─────────────────────────────────────┐ │ -│ │ Type: Evening Owl (75% confidence) │ │ -│ │ Genetic: CLOCK T/C (mild evening) + DEC2 G/G │ │ +│ │ Type: Evening Owl (75% confidence from 5 markers) │ │ +│ │ Genetic: CLOCK T/C + CRY1 C/C + PER2 C/C + DEC2 G/G│ │ │ │ Peak focus: 8pm-2am | Caffeine cutoff: 2pm │ │ +│ │ Late eating cutoff: 8pm (MTNR1B-informed) │ │ │ │ [Configure Schedule →] │ │ │ └───────────────────────────────────────────────────────┘ │ │ │ │ ┌─ Aesthetic Taste Card ────────────────────────────────┐ │ -│ │ Status: Needs questionnaire (0/7 sections) │ │ +│ │ Taste Tab: 0/5 sections completed (P2 UI ready) │ │ │ │ Detected themes from media: brutalist, atmospheric │ │ -│ │ [Start Taste Questionnaire →] │ │ +│ │ [Continue Taste Questionnaire →] [Go to Taste Tab →] │ │ │ └───────────────────────────────────────────────────────┘ │ │ │ │ ┌─ Life Goals Card ─────────────────────────────────────┐ │ @@ -330,11 +336,12 @@ The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** t #### P1: Identity Orchestrator & Chronotype (data layer) - Create `data/digital-twin/identity.json` with section status tracking -- Create `server/services/identity.js` — orchestrator that reads from genome, enrichment, and new data files -- Create `data/digital-twin/chronotype.json` — derive from genome sleep markers + daily_routines enrichment +- Create `server/services/identity.js` — orchestrator that reads from genome, enrichment, taste-profile, and new data files +- Create `data/digital-twin/chronotype.json` — derive from 5 genome sleep markers + daily_routines enrichment - Add `GET /api/digital-twin/identity` route returning unified section status - Add `GET/PUT /api/digital-twin/identity/chronotype` routes -- Derivation function: `deriveChronotypeFromGenome(genomeSummary)` extracts CLOCK + DEC2 status → chronotype signal +- Derivation function: `deriveChronotypeFromGenome(genomeSummary)` extracts all 5 sleep markers (CLOCK, DEC2, PER2, CRY1, MTNR1B) → composite chronotype signal with weighted confidence +- Cross-reference CYP1A2/ADA caffeine markers and MTNR1B melatonin receptor for caffeine cutoff and late-eating recommendations #### P2: Aesthetic Taste Questionnaire ✅ - Created `data/digital-twin/taste-profile.json` for structured taste preference storage @@ -348,7 +355,7 @@ The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** t - Create `data/digital-twin/goals.json` - Add `GET/POST/PUT/DELETE /api/digital-twin/identity/goals` routes - Birth date input + SSA actuarial table lookup -- Genome-adjusted life expectancy: weight longevity markers (FOXO3A, IGF1R, CETP) and cardiovascular risk markers (Factor V, 9p21, Lp(a)) into adjustment factor +- Genome-adjusted life expectancy: weight longevity markers (5 markers: FOXO3A, IGF1R, CETP, IPMK, TP53) and cardiovascular risk markers (5 markers: Factor V, 9p21, Lp(a), LPA aspirin, PCSK9) into adjustment factor - Time-horizon calculation: years remaining, healthy years, percent complete - Urgency scoring: `urgency = (goalHorizonYears - yearsRemaining) / goalHorizonYears` normalized - Goal CRUD with category tagging and milestone tracking @@ -361,31 +368,288 @@ The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** t - Create `GoalTracker.jsx` — goal list with urgency heatmap and timeline view - Wire sub-routes for deep dives +#### P2.5: Digital Twin Aesthetic Taste Prompting (brain idea 608dc733) + +##### Problem + +P2's Taste questionnaire uses static questions and keyword-triggered follow-ups. The questions are good but generic — they don't reference anything the twin already knows about the user. Brain idea 608dc733 proposes using the digital twin's existing knowledge (books, music, movie lists, enrichment answers, personality traits) to generate personalized, conversational prompts that feel like talking to someone who already knows you rather than filling out a survey. + +##### What Data to Capture + +The aesthetic taste system captures preferences across **7 domains**, extending P2's 5 sections with 2 new ones (fashion/texture and digital/interface): + +| Domain | Data Captured | Sources That Seed It | +|--------|--------------|---------------------| +| **Movies & Film** | Visual style preferences, narrative structure, mood/atmosphere, genre affinities, anti-preferences, formative films | BOOKS.md (narrative taste), enrichment:favorite_movies, existing P2 responses | +| **Music & Sound** | Functional use (focus/energy/decompress), genre affinities, production preferences, anti-sounds, formative artists | AUDIO.md, enrichment:music_taste, existing P2 responses | +| **Visual Art & Design** | Minimalism vs maximalism spectrum, color palette preferences, design movements, typography, layout sensibility | CREATIVE.md, enrichment:aesthetics, existing P2 responses | +| **Architecture & Spaces** | Material preferences, light quality, scale/intimacy, indoor-outdoor relationship, sacred vs functional | enrichment:aesthetics, existing P2 responses | +| **Food & Culinary** | Flavor profiles, cuisine affinities, cooking philosophy, dining experience priorities, sensory texture preferences | enrichment:daily_routines (meal patterns), existing P2 responses | +| **Fashion & Texture** *(new)* | Material/fabric preferences, silhouette comfort, color wardrobe, formality spectrum, tactile sensitivity | genome:sensory markers (if available), enrichment:aesthetics | +| **Digital & Interface** *(new)* | Dark vs light mode, information density, animation tolerance, typography preferences, notification style, tool aesthetics | PREFERENCES.md, existing PortOS theme choices (port-bg, port-card etc.) | + +Each domain captures: +- **Positive affinities** — what they're drawn to and why +- **Anti-preferences** — what they actively avoid (often more revealing than likes) +- **Functional context** — how the preference serves them (focus, comfort, identity, social) +- **Formative influences** — early experiences that shaped the preference +- **Evolution** — how the preference has changed over time + +##### Conversational Prompting Flow + +The key design principle: **conversation, not survey**. The twin generates questions that reference things it already knows, creating a dialogue that feels like it's building on shared context. + +**Flow architecture:** + +``` +┌─────────────────────────────────────────────────┐ +│ 1. Context Aggregation │ +│ Read: BOOKS.md, AUDIO.md, CREATIVE.md, │ +│ PREFERENCES.md, enrichment answers, │ +│ existing taste-profile.json responses, │ +│ personality traits (Big Five Openness) │ +├─────────────────────────────────────────────────┤ +│ 2. Static Core Question (from P2) │ +│ Serve the existing static question first │ +│ to establish baseline in that domain │ +├─────────────────────────────────────────────────┤ +│ 3. Personalized Follow-Up Generation │ +│ LLM generates 1 contextual follow-up using │ +│ identity context + previous answer │ +│ e.g., "You listed Blade Runner — what about │ +│ its visual language specifically grabbed you?" │ +├─────────────────────────────────────────────────┤ +│ 4. Depth Probing (optional, user-initiated) │ +│ "Want to go deeper?" button generates │ +│ another personalized question that connects │ +│ across domains (e.g., music taste ↔ visual) │ +├─────────────────────────────────────────────────┤ +│ 5. Summary & Synthesis │ +│ After core + follow-ups complete, LLM │ +│ generates section summary + cross-domain │ +│ pattern detection │ +└─────────────────────────────────────────────────┘ +``` + +**Prompt template for personalized question generation:** + +``` +You are a thoughtful interviewer building an aesthetic taste profile. +You already know the following about this person: + +## Identity Context +{identityContext — excerpts from BOOKS.md, AUDIO.md, enrichment answers, traits} + +## Previous Responses in This Section +{existingResponses — Q&A pairs from taste-profile.json for this section} + +## Section: {sectionLabel} + +Generate ONE follow-up question that: +1. References something specific from their identity context or previous answers +2. Probes WHY they prefer what they do, not just WHAT +3. Feels conversational — like a friend who knows them asking a natural question +4. Explores an angle their previous answers haven't covered yet +5. Is concise (1-2 sentences max) + +Do NOT: +- Ask generic questions that ignore the context +- Repeat topics already covered in previous responses +- Use survey language ("On a scale of 1-10...") +- Ask multiple questions at once +``` + +**Example personalized exchanges:** + +> **Static (P2):** "Name 3-5 films you consider near-perfect." +> **User:** "Blade Runner, Stalker, Lost in Translation, Drive, Arrival" +> +> **Personalized (P2.5):** "Your BOOKS.md lists several sci-fi titles with themes of isolation and altered perception. Four of your five film picks share that same atmosphere. Is solitude a feature of stories you're drawn to, or is it more about the specific visual treatment of lonely spaces?" + +> **Static (P2):** "What artists or albums have had a lasting impact?" +> **User:** "Radiohead, Boards of Canada, Massive Attack" +> +> **Personalized (P2.5):** "All three of those artists layer heavy texture over minimalist structures. Your CREATIVE.md mentions an appreciation for 'controlled complexity.' Does this principle — density within restraint — apply to how you think about visual design too?" + +##### Data Model — Where Taste Lives + +Taste data lives in **two files** with distinct roles: + +**1. Raw questionnaire responses: `data/digital-twin/taste-profile.json`** (existing, extended) + +```json +{ + "version": "2.0.0", + "createdAt": "...", + "updatedAt": "...", + "sections": { + "movies": { + "status": "completed", + "responses": [ + { + "questionId": "movies-core-1", + "answer": "Blade Runner, Stalker, Lost in Translation...", + "answeredAt": "...", + "source": "static" + }, + { + "questionId": "movies-p25-1", + "answer": "It's not solitude per se, it's the visual...", + "answeredAt": "...", + "source": "personalized", + "generatedQuestion": "Your BOOKS.md lists several sci-fi titles...", + "identityContextUsed": ["BOOKS.md:sci-fi-themes", "taste:movies-core-1"] + } + ], + "summary": "..." + }, + "fashion": { "status": "pending", "responses": [], "summary": null }, + "digital": { "status": "pending", "responses": [], "summary": null } + }, + "profileSummary": null, + "lastSessionAt": null +} +``` + +Changes from v1: +- `source` field distinguishes static vs personalized questions +- `generatedQuestion` stores the LLM-generated question text (since personalized questions aren't in the static definition) +- `identityContextUsed` tracks which identity sources informed the question (for provenance) +- Two new sections: `fashion`, `digital` +- Version bumped to 2.0.0 + +**2. Synthesized aesthetic profile: `data/digital-twin/aesthetics.json`** (planned in P1, populated by P2.5) + +```json +{ + "version": "1.0.0", + "updatedAt": "...", + "profile": { + "visualStyle": ["brutalist minimalism", "high-contrast neon", "controlled complexity"], + "narrativePreferences": ["isolation themes", "slow burn", "ambiguity over resolution"], + "musicProfile": ["textural electronica", "atmospheric layering", "functional listening"], + "spatialPreferences": ["raw materials", "dramatic light", "intimacy over grandeur"], + "culinaryIdentity": ["umami-driven", "improvisational cooking", "experience over formality"], + "fashionSensibility": ["monochrome", "natural fibers", "minimal branding"], + "digitalAesthetic": ["dark mode", "high information density", "subtle animation"], + "antiPatterns": ["visual clutter", "forced symmetry", "saccharine sentimentality"], + "corePrinciples": ["density within restraint", "function informing form", "earned complexity"] + }, + "sources": { + "tasteQuestionnaire": { + "sectionsCompleted": 7, + "totalResponses": 28, + "lastUpdated": "..." + }, + "enrichment": { + "aesthetics": { "questionsAnswered": 5 }, + "favoriteBooks": { "analyzed": true, "themes": ["existential sci-fi", "systems thinking"] }, + "favoriteMovies": { "analyzed": true, "themes": ["atmospheric isolation", "neon noir"] }, + "musicTaste": { "analyzed": true, "themes": ["textural electronica", "ambient"] } + }, + "documents": ["BOOKS.md", "AUDIO.md", "CREATIVE.md", "PREFERENCES.md"] + }, + "crossDomainPatterns": [ + "Preference for 'controlled complexity' appears across music (layered textures), visual art (minimalist structure with dense detail), architecture (raw materials with precise placement), and food (complex umami built from simple ingredients)", + "Anti-preference for overt sentimentality spans film (avoids melodrama), music (dislikes saccharine pop), and design (rejects decorative ornamentation)" + ], + "genomicCorrelations": { + "tasteReceptorGenes": "TAS2R38 status may correlate with bitter-food tolerance preferences", + "sensoryProcessing": "Olfactory receptor variants may explain heightened texture sensitivity" + } +} +``` + +This file is the **canonical aesthetic profile** referenced by the Identity orchestrator (`identity.json`). It is regenerated whenever taste-profile.json accumulates significant new responses. + +##### Implementation Steps + +1. **Add 2 new sections** to `TASTE_SECTIONS` in `taste-questionnaire.js`: `fashion` and `digital`, each with 3 core questions and keyword-triggered follow-ups +2. **Add `aggregateIdentityContext(sectionId)`** to `taste-questionnaire.js` — reads BOOKS.md, AUDIO.md, CREATIVE.md, PREFERENCES.md, enrichment answers, and existing taste responses to build a context string for the LLM +3. **Add `generatePersonalizedTasteQuestion(sectionId, existingResponses, identityContext)`** — calls the active AI provider with the prompt template above, returns a single personalized follow-up question +4. **Add `POST /api/digital-twin/taste/:section/personalized-question`** route that returns a generated question +5. **Extend `submitAnswer()`** to accept `source: 'personalized'` and store `generatedQuestion` + `identityContextUsed` metadata +6. **Add "Go deeper" button** to TasteTab.jsx after each static follow-up cycle completes — clicking it calls the personalized question endpoint +7. **Add `generateAestheticsProfile()`** to `taste-questionnaire.js` — synthesizes all taste-profile.json responses + enrichment data into `aesthetics.json` +8. **Bump taste-profile.json version** to 2.0.0, migrate existing responses to include `source: 'static'` +9. **Update TasteTab.jsx** to render personalized questions differently (subtle indicator showing the twin referenced specific context) + +##### Prerequisite Relaxation + +The original spec listed P1 (Identity orchestrator) as a hard prerequisite. This is relaxed: P2.5 can read identity documents directly from the filesystem (`BOOKS.md`, `AUDIO.md`, etc.) and enrichment data from `meta.json` without needing the orchestrator layer. The orchestrator becomes useful for caching and cross-section queries but is not strictly required for context aggregation. + #### P5: Cross-Insights Engine - Add `generateCrossInsights(identity)` in identity service - Cross-reference genome markers with chronotype, goals, and enrichment data - Generate natural-language insight strings (e.g., caffeine + chronotype, longevity + goal urgency) - Display on Identity dashboard and inject into CoS context when relevant - Consider autonomous job: periodic identity insight refresh +- Example cross-insights from current marker data: + - CLOCK + CRY1 + PER2 → composite chronotype confidence (3 markers agreeing = high confidence evening/morning) + - MTNR1B concern + evening chronotype → "avoid eating after 8pm — your melatonin receptor variant impairs late glucose handling" + - CYP1A2 slow metabolizer + CLOCK evening → "caffeine cutoff by noon, not 2pm" + - FOXO3A/CETP/IGF1R longevity markers + cardiovascular risk → adjusted life expectancy for goal urgency + +### Identity Extension Roadmap + +This roadmap connects brain ideas and the Genome Section Integration project (0e6a0332) into a unified implementation sequence. + +#### Source Ideas +- **Brain idea 608dc733**: "Prompting Aesthetic Taste Docs via Digital Twin" — use the twin's existing knowledge to generate personalized aesthetic preference questions +- **Brain idea 284dd487**: "Genome Types & Chronotype Trait" — derive chronotype from 5 sleep/circadian markers + behavioral data +- **Project 0e6a0332**: "Genome Section Integration" — unify genome data with Identity page architecture + +#### Phase Dependency Graph + +``` +P1: Identity Orchestrator & Chronotype ──── (brain idea 284dd487) + │ Creates identity.json, chronotype.json, + │ identity service, derivation from 5 sleep markers + │ + ├─► P2.5: Personalized Taste Prompting ─── (brain idea 608dc733) + │ Uses identity context to generate smart taste questions + │ Enhances existing TasteTab with twin-aware follow-ups + │ + ├─► P3: Mortality-Aware Goal Tracking + │ Birth date + genome longevity/cardio markers → life expectancy + │ Urgency scoring for prioritized goal management + │ + └─► P4: Identity Tab UI + Dashboard with summary cards for all 4 sections + Sub-routes for chronotype, taste, goals deep dives + │ + └─► P5: Cross-Insights Engine + Reads all sections, generates natural-language insights + Injects identity context into CoS agent briefings +``` + +#### Implementation Priority +1. **P1** — Foundation: nothing else works without the orchestrator +2. **P2.5** — Quick win: enhances existing Taste tab with minimal new infrastructure +3. **P3** — New feature: mortality-aware goals need genome data flowing through identity service +4. **P4** — UI: renders what P1-P3 produce +5. **P5** — Polish: cross-entity reasoning requires all sections populated ### Data Flow ``` -User uploads 23andMe → genome.json (existing) +User uploads 23andMe → genome.json (117 markers, 32 categories) + ↓ +Identity service reads 5 sleep markers + 2 caffeine markers ↓ -Identity service reads genome sleep markers + caffeine markers +Derives chronotype.json (+ behavioral input from daily_routines enrichment) ↓ -Derives chronotype.json (with behavioral input from enrichment) +Twin reads identity context → generates personalized taste questions (P2.5) ↓ -User sets birth date → goals.json (life expectancy from actuarial + genome) +User completes taste questionnaire → taste-profile.json → aesthetics.json ↓ -User completes taste questionnaire → aesthetics.json +LLM analyzes books/movies/music docs → seeds aesthetic profile themes ↓ -LLM analyzes books/movies/music → seeds aesthetic profile themes +User sets birth date → goals.json (life expectancy from actuarial + 10 genome markers) ↓ Cross-insights engine reads all 4 sections → generates natural-language insights ↓ -Identity tab renders unified dashboard +Identity tab renders unified dashboard with summary cards + insights ↓ CoS injects identity context into agent briefings when relevant ``` @@ -411,15 +675,19 @@ CoS injects identity context into agent briefings when relevant - `client/src/pages/DigitalTwin.jsx` — add Identity tab rendering - `client/src/services/api.js` — add identity API methods - `server/index.js` — mount identity routes +- `server/services/taste-questionnaire.js` — add `generatePersonalizedTasteQuestion()` using identity context (P2.5) +- `client/src/components/digital-twin/tabs/TasteTab.jsx` — wire personalized question generation (P2.5) ### Design Decisions -1. **Separate data files per section** (not one giant file) — each section has independent update cadence and the genome file is already large +1. **Separate data files per section** (not one giant file) — each section has independent update cadence and the genome file (82KB) is already large 2. **Derivation over duplication** — chronotype reads from genome.json at query time rather than copying marker data. Identity service is the join layer 3. **Progressive disclosure** — Identity tab shows summary cards; deep dives are sub-routes, not modals (per CLAUDE.md: all views must be deep-linkable) 4. **LLM-assisted but user-confirmed** — aesthetic themes extracted by LLM from media lists are suggestions, not gospel. User confirms/edits 5. **No new dependencies** — uses existing Zod, Express, React, Lucide stack 6. **Genome data stays read-only** — identity service reads genome markers but never writes to genome.json +7. **Taste data consolidation** — P2 created `taste-profile.json` (5 sections). P2.5 adds twin-aware personalized questions. Long-term, taste data migrates into `aesthetics.json` as the canonical aesthetic profile, with taste-profile.json as the raw questionnaire responses +8. **Weighted chronotype confidence** — 5 sleep markers weighted by specificity: CRY1 (strongest DSPD signal) > CLOCK (evening tendency) > PER2 (circadian period) > MTNR1B (melatonin coupling) > DEC2 (duration, not phase). Behavioral data from daily_routines enrichment gets equal weight to genetic composite --- diff --git a/client/package.json b/client/package.json index e2dffc09..0ab2a171 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.12.48", + "version": "0.13.20", "private": true, "type": "module", "scripts": { diff --git a/client/src/App.jsx b/client/src/App.jsx index 8477c53f..70298c29 100644 --- a/client/src/App.jsx +++ b/client/src/App.jsx @@ -1,6 +1,7 @@ import { Suspense, lazy } from 'react'; import { Routes, Route, Navigate } from 'react-router-dom'; import Layout from './components/Layout'; +import BrailleSpinner from './components/BrailleSpinner'; import Dashboard from './pages/Dashboard'; import Apps from './pages/Apps'; import CreateApp from './pages/CreateApp'; @@ -30,7 +31,7 @@ const CyberCity = lazy(() => import('./pages/CyberCity')); // Loading fallback for lazy-loaded pages const PageLoader = () => (
-
Loading...
+
); diff --git a/client/src/components/BrailleSpinner.jsx b/client/src/components/BrailleSpinner.jsx new file mode 100644 index 00000000..5a1e9cc6 --- /dev/null +++ b/client/src/components/BrailleSpinner.jsx @@ -0,0 +1,19 @@ +import { useState, useEffect } from 'react'; + +const FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; +const INTERVAL_MS = 80; + +export default function BrailleSpinner({ text, className = '' }) { + const [frame, setFrame] = useState(0); + + useEffect(() => { + const id = setInterval(() => setFrame(f => (f + 1) % FRAMES.length), INTERVAL_MS); + return () => clearInterval(id); + }, []); + + return ( + + {FRAMES[frame]}{text ? ` ${text}` : ''} + + ); +} diff --git a/client/src/components/CosDashboardWidget.jsx b/client/src/components/CosDashboardWidget.jsx index 17f51b65..7a0295d0 100644 --- a/client/src/components/CosDashboardWidget.jsx +++ b/client/src/components/CosDashboardWidget.jsx @@ -11,7 +11,8 @@ import { Zap, Bot, XCircle, - History + History, + Activity } from 'lucide-react'; import * as api from '../services/api'; @@ -23,19 +24,23 @@ const CosDashboardWidget = memo(function CosDashboardWidget() { const [summary, setSummary] = useState(null); const [learningSummary, setLearningSummary] = useState(null); const [recentTasks, setRecentTasks] = useState(null); + const [activityCalendar, setActivityCalendar] = useState(null); const [loading, setLoading] = useState(true); const [tasksExpanded, setTasksExpanded] = useState(false); useEffect(() => { const loadData = async () => { - const [quickData, learningData, tasksData] = await Promise.all([ - api.getCosQuickSummary().catch(() => null), - api.getCosLearningSummary().catch(() => null), - api.getCosRecentTasks(5).catch(() => null) + const silent = { silent: true }; + const [quickData, learningData, tasksData, calendarData] = await Promise.all([ + api.getCosQuickSummary(silent).catch(() => null), + api.getCosLearningSummary(silent).catch(() => null), + api.getCosRecentTasks(5, silent).catch(() => null), + api.getCosActivityCalendar(8, silent).catch(() => null) ]); setSummary(quickData); setLearningSummary(learningData); setRecentTasks(tasksData); + setActivityCalendar(calendarData); setLoading(false); }; @@ -184,6 +189,11 @@ const CosDashboardWidget = memo(function CosDashboardWidget() { + {/* Activity Calendar - GitHub-style heatmap */} + {activityCalendar?.weeks?.length > 0 && activityCalendar.summary.totalTasks > 0 && ( + + )} + {/* Recent Tasks Section */} {recentTasks?.tasks?.length > 0 && (
@@ -257,4 +267,112 @@ const CosDashboardWidget = memo(function CosDashboardWidget() { ); }); +/** + * ActivityCalendar - Compact GitHub-style activity heatmap + * Shows daily task completion as colored squares + */ +function ActivityCalendar({ data }) { + // Calculate intensity level (0-4) based on tasks completed + const getIntensityLevel = (tasks) => { + if (tasks === 0) return 0; + if (tasks === 1) return 1; + const max = data.maxTasks || 1; + const ratio = tasks / max; + if (ratio >= 0.75) return 4; + if (ratio >= 0.5) return 3; + if (ratio >= 0.25) return 2; + return 1; + }; + + // Get color class based on intensity and success rate + const getColorClass = (day) => { + if (day.tasks === 0 || day.isFuture) return 'bg-port-border/20'; + const intensity = getIntensityLevel(day.tasks); + + // Color based on success rate + if (day.successRate >= 80) { + const shades = ['', 'bg-emerald-900/50', 'bg-emerald-700/60', 'bg-emerald-500/70', 'bg-emerald-400']; + return shades[intensity]; + } else if (day.successRate >= 50) { + const shades = ['', 'bg-amber-900/50', 'bg-amber-700/60', 'bg-amber-500/70', 'bg-amber-400']; + return shades[intensity]; + } else { + const shades = ['', 'bg-red-900/50', 'bg-red-700/60', 'bg-red-500/70', 'bg-red-400']; + return shades[intensity]; + } + }; + + const formatDate = (dateStr) => { + const date = new Date(dateStr + 'T12:00:00'); + return date.toLocaleDateString('en-US', { weekday: 'short', month: 'short', day: 'numeric' }); + }; + + return ( +
+
+
+ + Activity + {data.currentStreak > 0 && ( + + + {data.currentStreak}d + + )} +
+ + {data.summary.activeDays} active days + + +
+ + {/* Calendar Grid */} +
+
+ {data.weeks.map((week, weekIdx) => ( +
+ {week.map((day) => ( +
+ ))} +
+ ))} +
+
+ + {/* Summary Row */} +
+ + {data.summary.totalTasks} tasks,{' '} + = 80 ? 'text-port-success' : + data.summary.successRate >= 50 ? 'text-port-warning' : 'text-port-error' + }`}>{data.summary.successRate}% success + + {/* Mini Legend */} +
+ Less +
+
+
+
+ More +
+
+
+ ); +} + export default CosDashboardWidget; diff --git a/client/src/components/GoalProgressWidget.jsx b/client/src/components/GoalProgressWidget.jsx new file mode 100644 index 00000000..abfd93ed --- /dev/null +++ b/client/src/components/GoalProgressWidget.jsx @@ -0,0 +1,156 @@ +import { useState, useEffect, memo } from 'react'; +import { Link } from 'react-router-dom'; +import { + Target, + ChevronRight, + TrendingUp, + AlertTriangle +} from 'lucide-react'; +import * as api from '../services/api'; + +/** + * GoalProgressWidget - Shows progress toward user goals on the dashboard + * Maps completed CoS tasks to goal categories from COS-GOALS.md + */ +const GoalProgressWidget = memo(function GoalProgressWidget() { + const [progress, setProgress] = useState(null); + const [loading, setLoading] = useState(true); + + useEffect(() => { + const loadData = async () => { + const data = await api.getCosGoalProgressSummary({ silent: true }).catch(() => null); + setProgress(data); + setLoading(false); + }; + + loadData(); + // Refresh every 60 seconds + const interval = setInterval(loadData, 60000); + return () => clearInterval(interval); + }, []); + + // Don't render while loading or if no goals + if (loading || !progress?.goals?.length) { + return null; + } + + const { goals, summary } = progress; + + // Color mappings for engagement levels + const getColorClasses = (color, engagement) => { + const intensityMap = { + high: { emerald: 'bg-emerald-500', purple: 'bg-purple-500', blue: 'bg-blue-500', pink: 'bg-pink-500', green: 'bg-green-500', gray: 'bg-gray-500' }, + medium: { emerald: 'bg-emerald-500/60', purple: 'bg-purple-500/60', blue: 'bg-blue-500/60', pink: 'bg-pink-500/60', green: 'bg-green-500/60', gray: 'bg-gray-500/60' }, + low: { emerald: 'bg-emerald-500/30', purple: 'bg-purple-500/30', blue: 'bg-blue-500/30', pink: 'bg-pink-500/30', green: 'bg-green-500/30', gray: 'bg-gray-500/30' } + }; + return intensityMap[engagement]?.[color] || 'bg-gray-500/30'; + }; + + const getTextColorClass = (color) => { + const colorMap = { + emerald: 'text-emerald-400', + purple: 'text-purple-400', + blue: 'text-blue-400', + pink: 'text-pink-400', + green: 'text-green-400', + gray: 'text-gray-400' + }; + return colorMap[color] || 'text-gray-400'; + }; + + return ( +
+ {/* Header */} +
+
+ +
+

Goal Progress

+

+ {summary.totalTasks} tasks toward {summary.totalGoals} goals +

+
+
+ + View Tasks + + +
+ + {/* Goal Progress Bars */} +
+ {goals.map((goal) => ( +
+
+
+ + {goal.name} +
+
+ {goal.successRate !== null && ( + = 80 ? 'text-port-success' : goal.successRate >= 50 ? 'text-port-warning' : 'text-port-error'}`}> + {goal.successRate}% + + )} + + {goal.tasks} task{goal.tasks !== 1 ? 's' : ''} + +
+
+ {/* Progress bar */} +
+
g.tasks), 1)) * 100))}%` + }} + /> +
+
+ ))} +
+ + {/* Insights Row */} + {(summary.mostActive || summary.leastActive) && ( +
+
+ {summary.mostActive && ( +
+ + Most active: {summary.mostActive} +
+ )} + {summary.leastActive && ( +
+ + Needs attention: {summary.leastActive} +
+ )} +
+
+ )} + + {/* Overall Success Rate */} + {summary.overallSuccessRate !== null && ( +
+ + Overall success rate:{' '} + = 80 ? 'text-port-success' : + summary.overallSuccessRate >= 50 ? 'text-port-warning' : 'text-port-error' + }`}> + {summary.overallSuccessRate}% + + +
+ )} +
+ ); +}); + +export default GoalProgressWidget; diff --git a/client/src/components/Layout.jsx b/client/src/components/Layout.jsx index 69ea2841..aeca793c 100644 --- a/client/src/components/Layout.jsx +++ b/client/src/components/Layout.jsx @@ -39,6 +39,7 @@ import packageJson from '../../package.json'; import Logo from './Logo'; import { useErrorNotifications } from '../hooks/useErrorNotifications'; import { useNotifications } from '../hooks/useNotifications'; +import { useAgentFeedbackToast } from '../hooks/useAgentFeedbackToast'; import NotificationDropdown from './NotificationDropdown'; const navItems = [ @@ -114,6 +115,9 @@ export default function Layout() { // Subscribe to server error notifications useErrorNotifications(); + // Subscribe to agent completion feedback toasts + useAgentFeedbackToast(); + // Notifications for user task alerts const { notifications, @@ -328,7 +332,7 @@ export default function Layout() { }; return ( -
+
{/* Skip to main content link for keyboard users */} {/* Main area */} -
+
{/* Mobile header */}
; + return
; } return ( diff --git a/client/src/components/agents/constants.js b/client/src/components/agents/constants.js index 7c0bd737..2661d058 100644 --- a/client/src/components/agents/constants.js +++ b/client/src/components/agents/constants.js @@ -4,7 +4,8 @@ export const AGENT_DETAIL_TABS = [ { id: 'overview', label: 'Overview', icon: '📋' }, - { id: 'tools', label: 'Tools', icon: '🛠️' }, + { id: 'tools', label: 'Moltbook', icon: '📚' }, + { id: 'world', label: 'World', icon: '🌍' }, { id: 'published', label: 'Published', icon: '📰' }, { id: 'schedules', label: 'Schedules', icon: '📅' }, { id: 'activity', label: 'Activity', icon: '📊' } @@ -19,12 +20,20 @@ export const PERSONALITY_STYLES = [ ]; export const ACTION_TYPES = [ - { value: 'post', label: 'Post', description: 'Create new posts', icon: '📝' }, - { value: 'comment', label: 'Comment', description: 'Reply to posts', icon: '💬' }, - { value: 'vote', label: 'Vote', description: 'Upvote or downvote content', icon: '👍' }, - { value: 'heartbeat', label: 'Heartbeat', description: 'Browse and engage naturally', icon: '💓' }, - { value: 'engage', label: 'Engage', description: 'AI-powered browsing, commenting, and voting', icon: '🤝' }, - { value: 'monitor', label: 'Monitor', description: 'Check post engagement and respond', icon: '👀' } + // Moltbook actions + { value: 'post', label: 'Post', description: 'Create new posts', icon: '📝', platform: 'moltbook' }, + { value: 'comment', label: 'Comment', description: 'Reply to posts', icon: '💬', platform: 'moltbook' }, + { value: 'vote', label: 'Vote', description: 'Upvote or downvote content', icon: '👍', platform: 'moltbook' }, + { value: 'heartbeat', label: 'Heartbeat', description: 'Browse and engage naturally', icon: '💓', platform: 'moltbook' }, + { value: 'engage', label: 'Engage', description: 'AI-powered browsing, commenting, and voting', icon: '🤝', platform: 'moltbook' }, + { value: 'monitor', label: 'Monitor', description: 'Check post engagement and respond', icon: '👀', platform: 'moltbook' }, + // Moltworld actions + { value: 'mw_heartbeat', label: 'Heartbeat', description: 'Stay visible in the world', icon: '💓', platform: 'moltworld' }, + { value: 'mw_explore', label: 'Explore', description: 'Move to coordinates and observe', icon: '🌍', platform: 'moltworld' }, + { value: 'mw_build', label: 'Build', description: 'Place or remove blocks', icon: '🧱', platform: 'moltworld' }, + { value: 'mw_say', label: 'Say', description: 'Send messages to nearby agents', icon: '💬', platform: 'moltworld' }, + { value: 'mw_think', label: 'Think', description: 'Send a visible thought', icon: '💭', platform: 'moltworld' }, + { value: 'mw_interact', label: 'Interact', description: 'Move, think, and optionally build', icon: '🤝', platform: 'moltworld' } ]; export const SCHEDULE_TYPES = [ @@ -34,7 +43,8 @@ export const SCHEDULE_TYPES = [ ]; export const PLATFORM_TYPES = [ - { value: 'moltbook', label: 'Moltbook', description: 'AI social platform', icon: '📚' } + { value: 'moltbook', label: 'Moltbook', description: 'AI social platform', icon: '📚' }, + { value: 'moltworld', label: 'Moltworld', description: 'Shared voxel world', icon: '🌍' } ]; export const ACCOUNT_STATUSES = { diff --git a/client/src/components/agents/tabs/ActivityTab.jsx b/client/src/components/agents/tabs/ActivityTab.jsx index 9e754786..423cfd6e 100644 --- a/client/src/components/agents/tabs/ActivityTab.jsx +++ b/client/src/components/agents/tabs/ActivityTab.jsx @@ -1,5 +1,6 @@ import { useState, useEffect, useCallback } from 'react'; import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; import { ACTION_TYPES } from '../constants'; export default function ActivityTab({ agentId }) { @@ -55,7 +56,7 @@ export default function ActivityTab({ agentId }) { }; if (loading) { - return
Loading activity...
; + return
; } return ( diff --git a/client/src/components/agents/tabs/OverviewTab.jsx b/client/src/components/agents/tabs/OverviewTab.jsx index e0454715..291f482c 100644 --- a/client/src/components/agents/tabs/OverviewTab.jsx +++ b/client/src/components/agents/tabs/OverviewTab.jsx @@ -3,6 +3,7 @@ import { useNavigate } from 'react-router-dom'; import { Cpu, Zap, MessageSquare, Eye } from 'lucide-react'; import toast from 'react-hot-toast'; import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; import { PERSONALITY_STYLES, DEFAULT_PERSONALITY, DEFAULT_AVATAR, PLATFORM_TYPES, ACCOUNT_STATUSES } from '../constants'; export default function OverviewTab({ agentId, agent, onAgentUpdate }) { @@ -35,6 +36,7 @@ export default function OverviewTab({ agentId, agent, onAgentUpdate }) { const [quickAccountId, setQuickAccountId] = useState(''); const [engaging, setEngaging] = useState(false); const [checking, setChecking] = useState(false); + const [exploring, setExploring] = useState(false); const [rateLimits, setRateLimits] = useState(null); // Cooldown timer state @@ -93,14 +95,22 @@ export default function OverviewTab({ agentId, agent, onAgentUpdate }) { } }, [accounts, quickAccountId]); + // Derive platform from selected quick account + const quickAccount = accounts.find(a => a.id === quickAccountId); + const quickPlatform = quickAccount?.platform || 'moltbook'; + // Load rate limits when quick account changes useEffect(() => { if (!quickAccountId) { setRateLimits(null); return; } - api.getAgentRateLimits(quickAccountId).then(setRateLimits).catch(() => {}); - }, [quickAccountId]); + const account = accounts.find(a => a.id === quickAccountId); + const endpoint = account?.platform === 'moltworld' + ? api.moltworldRateLimits(quickAccountId) + : api.getAgentRateLimits(quickAccountId); + endpoint.then(setRateLimits).catch(() => {}); + }, [quickAccountId, accounts]); // Calculate cooldown end timestamps from rate limit data useEffect(() => { @@ -197,6 +207,24 @@ export default function OverviewTab({ agentId, agent, onAgentUpdate }) { api.getAgentRateLimits(quickAccountId).then(setRateLimits).catch(() => {}); }; + // Moltworld quick actions + const handleExplore = async () => { + if (!agentId || !quickAccountId) return; + setExploring(true); + const result = await api.moltworldExplore(quickAccountId, agentId).catch(() => null); + setExploring(false); + if (!result) return; + toast.success(`Explored (${result.x}, ${result.y}) — ${result.nearby || 0} agents nearby`); + api.moltworldRateLimits(quickAccountId).then(setRateLimits).catch(() => {}); + }; + + const handleBuild = async () => { + if (!agentId || !quickAccountId) return; + const result = await api.moltworldBuild(quickAccountId, agentId, 0, 0, 0, 'stone', 'place').catch(() => null); + if (result) toast.success('Block placed'); + api.moltworldRateLimits(quickAccountId).then(setRateLimits).catch(() => {}); + }; + // Account handlers const resetAccountForm = () => { setAccountForm({ platform: 'moltbook', name: '', description: '' }); @@ -253,7 +281,7 @@ export default function OverviewTab({ agentId, agent, onAgentUpdate }) { }; if (!formData) { - return
Loading...
; + return
; } const activeAccounts = accounts.filter(a => a.status === 'active'); @@ -451,29 +479,52 @@ export default function OverviewTab({ agentId, agent, onAgentUpdate }) { )}
- - - + {quickPlatform === 'moltworld' ? ( + <> + + + + ) : ( + <> + + + + + )}
)} diff --git a/client/src/components/agents/tabs/PublishedTab.jsx b/client/src/components/agents/tabs/PublishedTab.jsx index cbb9fa96..b94602c8 100644 --- a/client/src/components/agents/tabs/PublishedTab.jsx +++ b/client/src/components/agents/tabs/PublishedTab.jsx @@ -1,5 +1,6 @@ import { useState, useEffect, useCallback } from 'react'; import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; const formatRelativeTime = (dateStr) => { const diff = Date.now() - new Date(dateStr).getTime(); @@ -51,7 +52,7 @@ export default function PublishedTab({ agentId }) { }, [loadPublished]); if (loading) { - return
Loading...
; + return
; } return ( diff --git a/client/src/components/agents/tabs/SchedulesTab.jsx b/client/src/components/agents/tabs/SchedulesTab.jsx index f91e6627..dcc1651f 100644 --- a/client/src/components/agents/tabs/SchedulesTab.jsx +++ b/client/src/components/agents/tabs/SchedulesTab.jsx @@ -1,5 +1,6 @@ import { useState, useEffect, useCallback } from 'react'; import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; import { ACTION_TYPES, SCHEDULE_TYPES, CRON_PRESETS, INTERVAL_PRESETS } from '../constants'; export default function SchedulesTab({ agentId }) { @@ -89,6 +90,13 @@ export default function SchedulesTab({ agentId }) { const filteredAccounts = accounts.filter(a => a.status === 'active'); + // Filter action types by the selected account's platform + const selectedAccount = accounts.find(a => a.id === formData.accountId); + const selectedPlatform = selectedAccount?.platform; + const filteredActionTypes = selectedPlatform + ? ACTION_TYPES.filter(a => a.platform === selectedPlatform) + : ACTION_TYPES.filter(a => a.platform === 'moltbook'); + const formatSchedule = (schedule) => { if (schedule.type === 'cron') { const preset = CRON_PRESETS.find(p => p.value === schedule.cron); @@ -107,7 +115,7 @@ export default function SchedulesTab({ agentId }) { }; if (loading) { - return
Loading schedules...
; + return
; } return ( @@ -145,14 +153,19 @@ export default function SchedulesTab({ agentId }) { @@ -164,7 +177,7 @@ export default function SchedulesTab({ agentId }) { onChange={(e) => setFormData({ ...formData, action: { type: e.target.value, params: {} } })} className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" > - {ACTION_TYPES.map(action => ( + {filteredActionTypes.map(action => ( diff --git a/client/src/components/agents/tabs/ToolsTab.jsx b/client/src/components/agents/tabs/ToolsTab.jsx index a57541fa..3f369288 100644 --- a/client/src/components/agents/tabs/ToolsTab.jsx +++ b/client/src/components/agents/tabs/ToolsTab.jsx @@ -1,10 +1,11 @@ import { useState, useEffect, useCallback } from 'react'; import toast from 'react-hot-toast'; import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; export default function ToolsTab({ agentId, agent }) { - const [accounts, setAccounts] = useState([]); const [selectedAccountId, setSelectedAccountId] = useState(''); + const [accountName, setAccountName] = useState(''); const [rateLimits, setRateLimits] = useState(null); const [loading, setLoading] = useState(true); @@ -45,21 +46,19 @@ export default function ToolsTab({ agentId, agent }) { const [cooldownEnds, setCooldownEnds] = useState({}); const [, setTick] = useState(0); - const fetchInitial = useCallback(async () => { - const accountsData = await api.getPlatformAccounts(agentId); - const active = accountsData.filter(a => a.status === 'active'); - setAccounts(active); - if (active.length === 1 && !selectedAccountId) { - setSelectedAccountId(active[0].id); - } - setLoading(false); - }, [agentId]); - + // Auto-resolve the moltbook account for this agent useEffect(() => { - fetchInitial(); - }, [fetchInitial]); + api.getPlatformAccounts(agentId, 'moltbook').then(data => { + const active = data.filter(a => a.status === 'active'); + if (active.length > 0) { + setSelectedAccountId(active[0].id); + setAccountName(active[0].credentials?.username || ''); + } + setLoading(false); + }).catch(() => setLoading(false)); + }, [agentId]); - // Load rate limits + submolts when account changes + // Load rate limits + submolts when account resolves useEffect(() => { if (!selectedAccountId) { setRateLimits(null); @@ -300,15 +299,14 @@ export default function ToolsTab({ agentId, agent }) { setPostTitle(draft.title || ''); setPostContent(draft.content || ''); if (draft.submolt) setSelectedSubmolt(draft.submolt); - if (draft.accountId) setSelectedAccountId(draft.accountId); setSelectedPost(null); setCommentContent(''); } else { setCommentContent(draft.content || ''); setReplyToId(draft.parentId || null); - if (draft.postId && draft.accountId) { - setSelectedAccountId(draft.accountId); - api.getAgentPost(draft.accountId, draft.postId).then(details => { + if (draft.postId) { + const acctId = draft.accountId || selectedAccountId; + api.getAgentPost(acctId, draft.postId).then(details => { setSelectedPost(details); setPostComments(details.comments || []); }).catch(() => {}); @@ -337,30 +335,23 @@ export default function ToolsTab({ agentId, agent }) { }; if (loading) { - return
Loading tools...
; + return
; } - const ready = !!selectedAccountId; + if (!selectedAccountId) { + return ( +
+

No active Moltbook account

+

Register a Moltbook account on the Overview tab to get started

+
+ ); + } return (
- {/* Header: Account Selection + Rate Limits */} -
-
- - -
+ {/* Header: Account Name + Rate Limits */} +
+ Account: {accountName} {rateLimits && (
{Object.entries(rateLimits).map(([action, rl]) => { @@ -384,14 +375,7 @@ export default function ToolsTab({ agentId, agent }) { )}
- {!ready && ( -
-

Select an account to get started

-

Use the dropdown above to choose an active Moltbook account

-
- )} - - {ready && (<> + {(<>
{/* Left Column: Feed + Engage */}
diff --git a/client/src/components/agents/tabs/WorldTab.jsx b/client/src/components/agents/tabs/WorldTab.jsx new file mode 100644 index 00000000..cce85b30 --- /dev/null +++ b/client/src/components/agents/tabs/WorldTab.jsx @@ -0,0 +1,1010 @@ +import { useState, useEffect, useCallback } from 'react'; +import toast from 'react-hot-toast'; +import * as api from '../../../services/api'; +import BrailleSpinner from '../../BrailleSpinner'; +import socket from '../../../services/socket'; +import useMoltworldWs from '../../../hooks/useMoltworldWs'; + +const EVENT_ICONS = { + status: '🔌', + presence: '👥', + thinking: '💭', + thought: '💭', + action: '🎬', + move: '🚶', + build: '🧱', + interaction: '💬', + message: '💬', + say: '💬', + nearby: '📡', + hello_ack: '👋', + welcome: '👋', + event: '📨' +}; + +const HISTORY_ACTION_ICONS = { + mw_explore: '🌍', + mw_build: '🧱', + mw_think: '💭', + mw_say: '💬', + mw_heartbeat: '💓', + mw_interact: '🤝' +}; + +const HISTORY_FILTERS = [ + { value: '', label: 'All' }, + { value: 'mw_explore', label: 'Explore' }, + { value: 'mw_think', label: 'Think' }, + { value: 'mw_say', label: 'Say' }, + { value: 'mw_build', label: 'Build' }, + { value: 'mw_heartbeat', label: 'Heartbeat' } +]; + +const QUEUE_ACTION_TYPES = [ + { value: 'mw_explore', label: 'Explore' }, + { value: 'mw_think', label: 'Think' }, + { value: 'mw_build', label: 'Build' }, + { value: 'mw_say', label: 'Say' } +]; + +const QUEUE_STATUS_STYLES = { + pending: 'bg-gray-600/20 text-gray-400', + executing: 'bg-port-accent/20 text-port-accent animate-pulse', + completed: 'bg-port-success/20 text-port-success', + failed: 'bg-port-error/20 text-port-error' +}; + +function formatEventTime(ts) { + const d = new Date(ts); + return d.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit', second: '2-digit' }); +} + +function formatRelativeTime(ts) { + const diff = Date.now() - new Date(ts).getTime(); + if (diff < 60000) return 'just now'; + if (diff < 3600000) return `${Math.floor(diff / 60000)}m ago`; + if (diff < 86400000) return `${Math.floor(diff / 3600000)}h ago`; + return `${Math.floor(diff / 86400000)}d ago`; +} + +function summarizeParams(action, params) { + if (!params) return ''; + if (action === 'mw_explore' || action === 'mw_heartbeat') { + const parts = []; + if (params.x != null && params.y != null) parts.push(`(${params.x}, ${params.y})`); + if (params.thinking) parts.push(`"${params.thinking.substring(0, 40)}..."`); + return parts.join(' '); + } + if (action === 'mw_think') return params.thought ? `"${params.thought.substring(0, 50)}"` : ''; + if (action === 'mw_say') return params.message ? `"${params.message.substring(0, 50)}"` : ''; + if (action === 'mw_build') return `(${params.x},${params.y},${params.z}) ${params.type || 'stone'} ${params.action || 'place'}`; + return ''; +} + +export default function WorldTab({ agentId }) { + const [accountId, setAccountId] = useState(null); + const [accountName, setAccountName] = useState(''); + const [rateLimits, setRateLimits] = useState(null); + const [loading, setLoading] = useState(true); + + // Status state + const [status, setStatus] = useState(null); + const [statusLoading, setStatusLoading] = useState(false); + + // Nearby agents + messages from last join/explore + const [nearby, setNearby] = useState([]); + const [messages, setMessages] = useState([]); + + // Move/Explore state + const [moveX, setMoveX] = useState(''); + const [moveY, setMoveY] = useState(''); + const [moveThinking, setMoveThinking] = useState(''); + const [moving, setMoving] = useState(false); + + // Think state + const [thought, setThought] = useState(''); + const [thinking, setThinking] = useState(false); + + // Build state + const [buildX, setBuildX] = useState(''); + const [buildY, setBuildY] = useState(''); + const [buildZ, setBuildZ] = useState('0'); + const [blockType, setBlockType] = useState('stone'); + const [buildAction, setBuildAction] = useState('place'); + const [building, setBuilding] = useState(false); + + // Say state + const [sayMessage, setSayMessage] = useState(''); + const [sayTo, setSayTo] = useState(''); + const [saying, setSaying] = useState(false); + + // Cooldown timer state + const [cooldownEnds, setCooldownEnds] = useState({}); + const [, setTick] = useState(0); + + // Activity History state + const [history, setHistory] = useState([]); + const [historyLoading, setHistoryLoading] = useState(false); + const [historyHasMore, setHistoryHasMore] = useState(false); + const [historyFilter, setHistoryFilter] = useState(''); + + // Action Queue state + const [queue, setQueue] = useState([]); + const [queueLoading, setQueueLoading] = useState(false); + const [showAddForm, setShowAddForm] = useState(false); + const [newActionType, setNewActionType] = useState('mw_explore'); + const [newActionParams, setNewActionParams] = useState({}); + + // WebSocket hook + const { + connectionStatus, + feedItems, + presence, + connect: wsConnect, + disconnect: wsDisconnect + } = useMoltworldWs(); + + const wsConnected = connectionStatus === 'connected'; + + // Auto-resolve the moltworld account for this agent + useEffect(() => { + api.getPlatformAccounts(agentId, 'moltworld').then(data => { + const active = data.filter(a => a.status === 'active'); + if (active.length > 0) { + setAccountId(active[0].id); + setAccountName(active[0].credentials?.username || ''); + } + setLoading(false); + }).catch(() => setLoading(false)); + }, [agentId]); + + const fetchStatus = useCallback(async () => { + if (!accountId) return; + setStatusLoading(true); + const data = await api.moltworldStatus(accountId).catch(() => null); + if (data) setStatus(data); + setStatusLoading(false); + }, [accountId]); + + const fetchRateLimits = useCallback(async () => { + if (!accountId) return; + const data = await api.moltworldRateLimits(accountId).catch(() => null); + if (data) setRateLimits(data); + }, [accountId]); + + // Fetch activity history + const fetchHistory = useCallback(async (append = false, before = null) => { + setHistoryLoading(true); + const mwActions = historyFilter || 'mw_explore,mw_build,mw_think,mw_say,mw_heartbeat,mw_interact'; + let data; + if (before) { + data = await api.getAgentActivityTimeline(30, [agentId], before).catch(() => null); + } else { + data = await api.getAgentActivities(30, [agentId], mwActions).catch(() => null); + } + if (data) { + const items = Array.isArray(data) ? data : data.activities || []; + // Filter to mw_* actions only + const mwItems = items.filter(a => a.action?.startsWith('mw_')); + if (append) { + setHistory(prev => [...prev, ...mwItems]); + } else { + setHistory(mwItems); + } + setHistoryHasMore(mwItems.length >= 30); + } + setHistoryLoading(false); + }, [agentId, historyFilter]); + + // Fetch action queue + const fetchQueue = useCallback(async () => { + setQueueLoading(true); + const data = await api.moltworldGetQueue(agentId).catch(() => null); + if (data) setQueue(data); + setQueueLoading(false); + }, [agentId]); + + // Fetch status + rate limits + history + queue when account resolves + useEffect(() => { + if (!accountId) return; + fetchStatus(); + fetchRateLimits(); + fetchHistory(); + fetchQueue(); + }, [accountId, fetchStatus, fetchRateLimits, fetchHistory, fetchQueue]); + + // Re-fetch history when filter changes + useEffect(() => { + if (accountId) fetchHistory(); + }, [historyFilter]); // eslint-disable-line react-hooks/exhaustive-deps + + // Listen for queue Socket.IO events + useEffect(() => { + const handleQueueChange = () => fetchQueue(); + socket.on('moltworld:queue:added', handleQueueChange); + socket.on('moltworld:queue:updated', handleQueueChange); + socket.on('moltworld:queue:removed', handleQueueChange); + return () => { + socket.off('moltworld:queue:added', handleQueueChange); + socket.off('moltworld:queue:updated', handleQueueChange); + socket.off('moltworld:queue:removed', handleQueueChange); + }; + }, [fetchQueue]); + + // Calculate cooldown end timestamps from rate limit data + useEffect(() => { + if (!rateLimits) { setCooldownEnds({}); return; } + const ends = {}; + const now = Date.now(); + for (const [action, rl] of Object.entries(rateLimits)) { + if (rl?.cooldownRemainingMs > 0) { + ends[action] = now + rl.cooldownRemainingMs; + } + } + setCooldownEnds(ends); + }, [rateLimits]); + + // Tick cooldown timer every second while any cooldown is active + useEffect(() => { + const hasActive = Object.values(cooldownEnds).some(end => end > Date.now()); + if (!hasActive) return; + let refetched = false; + const interval = setInterval(() => { + const stillActive = Object.values(cooldownEnds).some(end => end > Date.now()); + setTick(t => t + 1); + if (!stillActive && !refetched) { + refetched = true; + fetchRateLimits(); + } + }, 1000); + return () => clearInterval(interval); + }, [cooldownEnds, fetchRateLimits]); + + const updateFromJoinResponse = (result) => { + if (result?.agents) setNearby(result.agents); + const msgs = [ + ...(result?.messages || []).map(m => ({ ...m, type: 'say', from: m.fromName })), + ...(result?.thoughts || []).map(t => ({ ...t, type: 'thought', from: t.agentName, message: t.thought })) + ].sort((a, b) => (b.timestamp || 0) - (a.timestamp || 0)); + if (msgs.length) setMessages(msgs); + }; + + const refreshAfterAction = () => { + fetchRateLimits(); + fetchHistory(); + }; + + const handleExplore = async (random = false) => { + if (!accountId) return; + setMoving(true); + const x = random ? undefined : (moveX !== '' ? parseInt(moveX) : undefined); + const y = random ? undefined : (moveY !== '' ? parseInt(moveY) : undefined); + + if (wsConnected && !random && x != null && y != null) { + await api.moltworldWsMove(x, y, moveThinking || undefined).catch(() => null); + setMoving(false); + toast.success(`Move sent via WS to (${x}, ${y})`); + refreshAfterAction(); + return; + } + + const result = await api.moltworldExplore( + accountId, agentId, x, y, moveThinking || undefined + ).catch(() => null); + setMoving(false); + if (!result) { fetchRateLimits(); return; } + updateFromJoinResponse(result); + toast.success(`Moved to (${result.x}, ${result.y})`); + fetchStatus(); + refreshAfterAction(); + }; + + const handleThink = async () => { + if (!accountId || !thought) return; + setThinking(true); + + if (wsConnected) { + await api.moltworldWsThink(thought).catch(() => null); + setThinking(false); + toast.success('Thought sent via WS'); + setThought(''); + refreshAfterAction(); + return; + } + + const result = await api.moltworldThink(accountId, thought, agentId).catch(() => null); + setThinking(false); + if (!result) { fetchRateLimits(); return; } + toast.success('Thought sent'); + setThought(''); + refreshAfterAction(); + }; + + const handleBuild = async () => { + if (!accountId || buildX === '' || buildY === '') return; + setBuilding(true); + const result = await api.moltworldBuild( + accountId, agentId, + parseInt(buildX), parseInt(buildY), parseInt(buildZ || '0'), + blockType, buildAction + ).catch(() => null); + setBuilding(false); + if (!result) { fetchRateLimits(); return; } + toast.success(`Block ${buildAction}d at (${buildX}, ${buildY}, ${buildZ})`); + refreshAfterAction(); + }; + + const handleSay = async () => { + if (!accountId || !sayMessage) return; + setSaying(true); + + if (wsConnected && sayTo) { + await api.moltworldWsInteract(sayTo, { message: sayMessage }).catch(() => null); + setSaying(false); + toast.success('DM sent via WS'); + setSayMessage(''); + setSayTo(''); + refreshAfterAction(); + return; + } + + const result = await api.moltworldSay( + accountId, sayMessage, sayTo || undefined, agentId + ).catch(() => null); + setSaying(false); + if (!result) { fetchRateLimits(); return; } + updateFromJoinResponse(result); + toast.success(sayTo ? 'DM sent' : 'Message sent'); + setSayMessage(''); + setSayTo(''); + refreshAfterAction(); + }; + + const handleLoadMoreHistory = () => { + if (history.length === 0) return; + const lastTs = history[history.length - 1].timestamp; + fetchHistory(true, lastTs); + }; + + const handleAddToQueue = async () => { + const params = { ...newActionParams }; + // Convert numeric fields + if (params.x != null) params.x = parseInt(params.x); + if (params.y != null) params.y = parseInt(params.y); + if (params.z != null) params.z = parseInt(params.z); + + await api.moltworldAddToQueue(agentId, newActionType, params).catch(() => null); + setShowAddForm(false); + setNewActionParams({}); + toast.success('Action queued'); + }; + + const handleRemoveFromQueue = async (id) => { + await api.moltworldRemoveFromQueue(id).catch(() => null); + }; + + const getCooldownMs = (action) => { + const end = cooldownEnds[action]; + return end ? Math.max(0, end - Date.now()) : 0; + }; + + const formatCooldown = (ms) => { + const totalSeconds = Math.ceil(ms / 1000); + const minutes = Math.floor(totalSeconds / 60); + const seconds = totalSeconds % 60; + return `${minutes}:${seconds.toString().padStart(2, '0')}`; + }; + + if (loading) { + return
; + } + + if (!accountId) { + return ( +
+

No active Moltworld account

+

Register a Moltworld account on the Overview tab to get started

+
+ ); + } + + const profile = status?.profile; + const bal = status?.balance?.balance || status?.balance; + const displayNearby = presence.length > 0 ? presence : nearby; + + const statusDotColor = { + connected: 'bg-port-success', + connecting: 'bg-port-warning animate-pulse', + reconnecting: 'bg-port-warning animate-pulse', + disconnected: 'bg-gray-600' + }[connectionStatus] || 'bg-gray-600'; + + // Dynamic param fields for add-to-queue form + const renderQueueParamFields = () => { + switch (newActionType) { + case 'mw_explore': + return ( +
+ setNewActionParams(p => ({ ...p, x: e.target.value }))} className="px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + setNewActionParams(p => ({ ...p, y: e.target.value }))} className="px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + setNewActionParams(p => ({ ...p, thinking: e.target.value }))} className="col-span-2 px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> +
+ ); + case 'mw_think': + return ( + setNewActionParams(p => ({ ...p, thought: e.target.value }))} className="w-full px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + ); + case 'mw_build': + return ( +
+ setNewActionParams(p => ({ ...p, x: e.target.value }))} className="px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + setNewActionParams(p => ({ ...p, y: e.target.value }))} className="px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + setNewActionParams(p => ({ ...p, z: e.target.value }))} className="px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + + +
+ ); + case 'mw_say': + return ( +
+ setNewActionParams(p => ({ ...p, message: e.target.value }))} className="w-full px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> + setNewActionParams(p => ({ ...p, sayTo: e.target.value }))} className="w-full px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm" /> +
+ ); + default: + return null; + } + }; + + return ( +
+ {/* Connection Banner */} +
+
+ + WebSocket: + {connectionStatus} +
+
+ {connectionStatus === 'disconnected' ? ( + + ) : ( + + )} +
+
+ + {/* Header: Account Name + Rate Limits */} +
+ Account: {accountName} + {rateLimits && ( +
+ {Object.entries(rateLimits).map(([action, rl]) => { + if (!rl?.cooldownMs) return null; + const cooldownMs = getCooldownMs(action); + const isCooling = cooldownMs > 0; + const colorClass = isCooling + ? 'bg-port-warning/20 text-port-warning animate-pulse' + : 'bg-port-success/20 text-port-success'; + return ( +
+ {isCooling ? `${action}: ${formatCooldown(cooldownMs)}` : `${action}: ready`} +
+ ); + })} +
+ )} +
+ +
+ {/* Left Column: World State */} +
+ {/* Status Card */} +
+
+

World Status

+ +
+ + {!status && !statusLoading && ( +

No status data yet. Click Refresh.

+ )} + + {status && ( +
+
+
+ Status +
+ + {(profile?.inWorld || bal?.isOnline) ? 'Online' : 'Offline'} +
+
+
+ Position +

+ ({profile?.worldState?.x ?? '?'}, {profile?.worldState?.y ?? '?'}) +

+
+
+ SIM Balance +

+ {bal?.sim != null ? Number(bal.sim).toFixed(2) : '?'} SIM +

+
+
+ Earning Rate +

+ {bal?.earningRate ?? '0.1 SIM/hour'} +

+
+ {bal?.totalEarned != null && ( +
+ Total Earned +

{Number(bal.totalEarned).toFixed(2)} SIM

+
+ )} + {bal?.totalOnlineTime != null && ( +
+ Online Time +

{bal.totalOnlineTime}

+
+ )} +
+ {profile?.agent?.name && ( +
+ Agent: {profile.agent.name} {profile.agent?.appearance?.emoji || ''} +
+ )} +
+ )} +
+ + {/* Live Feed */} +
+
+

Live Feed

+ {wsConnected && ( + + + live + + )} + {feedItems.length > 0 && ( + ({feedItems.length}) + )} +
+ {feedItems.length === 0 ? ( +

+ {wsConnected ? 'Waiting for events...' : 'Connect WebSocket to see live events'} +

+ ) : ( +
+ {feedItems.slice(0, 50).map((item) => ( +
+ {EVENT_ICONS[item.eventType] || '📨'} +
+
+ {item.agentName && ( + {item.agentName} + )} + {item.eventType} +
+ {item.content && ( +

{item.content}

+ )} +
+ {formatEventTime(item.timestamp)} +
+ ))} +
+ )} +
+ + {/* Nearby Agents */} +
+

+ Nearby Agents{displayNearby.length > 0 && ({displayNearby.length})} + {presence.length > 0 && wsConnected && ( + (live) + )} +

+ {displayNearby.length === 0 ? ( +

+ No nearby agents. Explore the world to discover others. +

+ ) : ( +
+ {displayNearby.slice(0, 20).map((agent, i) => ( +
+
+ {agent.appearance?.emoji || '🤖'} + {agent.name || 'Unknown'} + {agent.thinking && "{agent.thinking}"} +
+ + ({agent.x ?? '?'}, {agent.y ?? '?'}) + {agent.distance != null && ` - ${Math.round(agent.distance)}m`} + +
+ ))} +
+ )} +
+ + {/* Recent Messages */} +
+

Recent Messages

+ {messages.length === 0 ? ( +

+ No recent messages. Messages expire after 5 minutes. +

+ ) : ( +
+ {messages.map((msg, i) => ( +
+
+ {msg.from || 'Agent'} + {msg.type === 'thought' && (thought)} + {msg.type === 'say' && (say)} +
+

{msg.message || msg.thought}

+
+ ))} +
+ )} +
+ + {/* Activity History */} +
+
+

Activity History

+
+ + +
+
+ {history.length === 0 ? ( +

+ {historyLoading ? 'Loading...' : 'No activity history yet'} +

+ ) : ( +
+ {history.map((entry) => ( +
+ {HISTORY_ACTION_ICONS[entry.action] || '📋'} +
+
+ + {entry.action?.replace('mw_', '')} + + + {entry.status} + + {entry.params?.via === 'ws' && ( + WS + )} +
+

+ {summarizeParams(entry.action, entry.params)} +

+
+ + {formatRelativeTime(entry.timestamp)} + +
+ ))} + {historyHasMore && ( + + )} +
+ )} +
+
+ + {/* Right Column: Actions */} +
+ {/* Action Queue */} +
+
+

Action Queue

+
+ {queue.length > 0 && ( + {queue.length} item{queue.length !== 1 ? 's' : ''} + )} + +
+
+ + {showAddForm && ( +
+ + {renderQueueParamFields()} + +
+ )} + + {queue.length === 0 && !queueLoading ? ( +

No queued actions

+ ) : ( +
+ {queue.map((item) => ( +
+ {HISTORY_ACTION_ICONS[item.actionType] || '📋'} +
+
+ {item.actionType?.replace('mw_', '')} + + {item.status} + +
+

+ {summarizeParams(item.actionType, item.params)} +

+
+ {item.status === 'pending' && ( + + )} +
+ ))} +
+ )} +
+ + {/* Move / Explore */} +
+
+

Move / Explore

+ {wsConnected && (via WS)} +
+
+
+ + setMoveX(e.target.value)} + min={-240} + max={240} + placeholder="X" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" + /> +
+
+ + setMoveY(e.target.value)} + min={-240} + max={240} + placeholder="Y" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" + /> +
+
+ setMoveThinking(e.target.value)} + placeholder="Thinking... (optional)" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white mb-3" + /> +
+ + +
+
+ + {/* Think */} +
+
+

Think

+ {wsConnected && (via WS)} +
+ setThought(e.target.value)} + placeholder="What is this agent thinking?" + maxLength={500} + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white mb-3" + /> + +
+ + {/* Build */} +
+

Build

+
+
+ + setBuildX(e.target.value)} + min={-500} + max={500} + placeholder="X" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" + /> +
+
+ + setBuildY(e.target.value)} + min={-500} + max={500} + placeholder="Y" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" + /> +
+
+ + setBuildZ(e.target.value)} + min={0} + max={100} + placeholder="Z" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" + /> +
+
+
+ + +
+ +
+ + {/* Say */} +
+
+

Say

+ {wsConnected && sayTo && (DM via WS)} +
+ setSayMessage(e.target.value)} + placeholder="Message to nearby agents..." + maxLength={500} + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white mb-2" + /> + setSayTo(e.target.value)} + placeholder="To Agent ID (optional — leave blank for broadcast)" + className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white mb-3" + /> + +
+
+
+
+ ); +} diff --git a/client/src/components/brain/constants.js b/client/src/components/brain/constants.js index 2d15e1c6..b0ee6193 100644 --- a/client/src/components/brain/constants.js +++ b/client/src/components/brain/constants.js @@ -66,6 +66,12 @@ export const PROJECT_STATUS_COLORS = { done: 'bg-gray-500/20 text-gray-400 border-gray-500/30' }; +// Idea status colors +export const IDEA_STATUS_COLORS = { + active: 'bg-yellow-500/20 text-yellow-400 border-yellow-500/30', + done: 'bg-gray-500/20 text-gray-400 border-gray-500/30' +}; + // Admin status colors export const ADMIN_STATUS_COLORS = { open: 'bg-blue-500/20 text-blue-400 border-blue-500/30', diff --git a/client/src/components/brain/tabs/MemoryTab.jsx b/client/src/components/brain/tabs/MemoryTab.jsx index f9ea384c..ca1d6fa5 100644 --- a/client/src/components/brain/tabs/MemoryTab.jsx +++ b/client/src/components/brain/tabs/MemoryTab.jsx @@ -8,7 +8,8 @@ import { X, Save, ChevronDown, - ChevronRight + ChevronRight, + CheckCircle2 } from 'lucide-react'; import toast from 'react-hot-toast'; @@ -16,6 +17,7 @@ import { MEMORY_TABS, DESTINATIONS, PROJECT_STATUS_COLORS, + IDEA_STATUS_COLORS, ADMIN_STATUS_COLORS, formatRelativeTime } from '../constants'; @@ -44,7 +46,7 @@ export default function MemoryTab({ onRefresh }) { data = await api.getBrainProjects(filters).catch(() => []); break; case 'ideas': - data = await api.getBrainIdeas().catch(() => []); + data = await api.getBrainIdeas(filters).catch(() => []); break; case 'admin': data = await api.getBrainAdmin(filters).catch(() => []); @@ -173,6 +175,36 @@ export default function MemoryTab({ onRefresh }) { } }; + const handleMarkDone = async (record) => { + let result; + const update = { status: 'done' }; + switch (activeType) { + case 'projects': + result = await api.updateBrainProject(record.id, update).catch(err => { + toast.error(err.message); + return null; + }); + break; + case 'ideas': + result = await api.updateBrainIdea(record.id, update).catch(err => { + toast.error(err.message); + return null; + }); + break; + case 'admin': + result = await api.updateBrainAdminItem(record.id, update).catch(err => { + toast.error(err.message); + return null; + }); + break; + } + if (result) { + toast.success('Marked as done'); + fetchRecords(); + onRefresh?.(); + } + }; + const startEdit = (record) => { setEditingId(record.id); setEditForm({ ...record }); @@ -255,6 +287,14 @@ export default function MemoryTab({ onRefresh }) { onChange={(e) => setForm({ ...form, title: e.target.value })} className="w-full px-3 py-2 bg-port-bg border border-port-border rounded text-white" /> + -

{record.title}

+
+

{record.title}

+ + {record.status || 'active'} + +

{record.oneLiner}

{record.notes &&

{record.notes}

} @@ -397,6 +442,15 @@ export default function MemoryTab({ onRefresh }) {
+ {(activeType === 'projects' || activeType === 'ideas' || activeType === 'admin') && record.status !== 'done' && ( + + )} - {/* Status filter for projects/admin */} - {(activeType === 'projects' || activeType === 'admin') && ( + {/* Status filter for projects/ideas/admin */} + {(activeType === 'projects' || activeType === 'ideas' || activeType === 'admin') && ( onChange(parseFloat(e.target.value))} - className="w-full h-1 bg-gray-700 rounded-full appearance-none cursor-pointer accent-cyan-500" + className="w-full h-1.5 bg-gray-700 rounded-full appearance-none cursor-pointer accent-cyan-500" style={{ - background: `linear-gradient(to right, #06b6d4 0%, #06b6d4 ${value / max * 100}%, #374151 ${value / max * 100}%, #374151 100%)`, + background: `linear-gradient(to right, #06b6d4 0%, #06b6d4 ${(value - min) / (max - min) * 100}%, #374151 ${(value - min) / (max - min) * 100}%, #374151 100%)`, }} />
); } +function SectionHeader({ title, subtitle }) { + return ( +
+
{title}
+ {subtitle && ( +
{subtitle}
+ )} +
+ ); +} + export default function CitySettingsPanel() { const navigate = useNavigate(); const { settings, updateSetting, resetSettings } = useCitySettingsContext(); @@ -67,38 +80,41 @@ export default function CitySettingsPanel() { return (
-
+
{/* Header */} -
- +
+ SETTINGS
-
+
{/* Quality Preset */}
-
QUALITY PRESET
-
+ +
{Object.keys(QUALITY_PRESETS).map(preset => (
+
+ {/* Music */}
-
MUSIC
+ updateSetting('musicEnabled', v)} + description="Enable ambient synthwave music" /> {settings.musicEnabled && ( updateSetting('musicVolume', v)} + description="Music playback volume" /> )}
{/* Sound Effects */}
-
SOUND FX
+ updateSetting('sfxEnabled', v)} + description="Enable sound effects for interactions" /> {settings.sfxEnabled && ( updateSetting('sfxVolume', v)} + description="Sound effects volume" /> )}
+
+ {/* Visual Effects */}
-
VISUAL FX
+ updateSetting('bloomEnabled', v)} + description="Glowing light bloom around bright surfaces" /> {settings.bloomEnabled && ( updateSetting('bloomStrength', v)} + description="Intensity of the bloom glow effect" /> )} updateSetting('reflectionsEnabled', v)} + description="Wet street reflections and puddles" /> updateSetting('chromaticAberration', v)} + description="Color fringing at screen edges" /> updateSetting('filmGrain', v)} + description="Subtle animated noise overlay" /> updateSetting('colorGrading', v)} + description="Cinematic color correction" /> updateSetting('scanlineOverlay', v)} + description="CRT monitor scanline overlay" />
+
+ {/* Scene Lighting */}
-
SCENE LIGHTING
+ `${v.toFixed(1)}x`} + description="Overall scene ambient light level" /> `${v.toFixed(1)}x`} + description="Brightness of neon lights and building glow" /> -
-
TIME OF DAY
-
+
+
TIME OF DAY
+
{['sunrise', 'noon', 'sunset', 'midnight'].map(tod => (
+
+ {/* Reset */} diff --git a/client/src/components/city/HolographicPanel.jsx b/client/src/components/city/HolographicPanel.jsx index 86a78bbc..78dd76ba 100644 --- a/client/src/components/city/HolographicPanel.jsx +++ b/client/src/components/city/HolographicPanel.jsx @@ -1,17 +1,37 @@ import { Html } from '@react-three/drei'; +const STATUS_ICONS = { + online: '\u25CF', + stopped: '\u25A0', + not_started: '\u25CB', + not_found: '\u25CB', +}; + export default function HolographicPanel({ app, agentCount, position }) { const statusColors = { online: 'border-cyan-500/50 text-cyan-400', - stopped: 'border-amber-500/50 text-amber-400', - not_started: 'border-indigo-500/50 text-indigo-400', - not_found: 'border-indigo-500/50 text-indigo-400', + stopped: 'border-red-500/50 text-red-400', + not_started: 'border-violet-500/50 text-violet-400', + not_found: 'border-violet-500/50 text-violet-400', + }; + + const statusDotColors = { + online: 'text-cyan-400', + stopped: 'text-red-400', + not_started: 'text-violet-400', + not_found: 'text-violet-400', }; const colorClass = app.archived ? 'border-slate-500/50 text-slate-400' : statusColors[app.overallStatus] || statusColors.not_started; + const dotColor = app.archived + ? 'text-slate-500' + : statusDotColors[app.overallStatus] || 'text-violet-400'; + + const processCount = app.processes?.length || 0; + return ( -
-
{app.name}
-
- {app.archived ? 'ARCHIVED' : (app.overallStatus || '').toUpperCase().replace('_', ' ')} - {agentCount > 0 && | {agentCount} AGENT{agentCount > 1 ? 'S' : ''}} +
+
{app.name}
+
+ + {STATUS_ICONS[app.overallStatus] || '\u25CB'} + + {app.archived ? 'ARCHIVED' : (app.overallStatus || '').toUpperCase().replace('_', ' ')} + {processCount > 0 && ( + | {processCount} PROC + )} + {agentCount > 0 && ( + | {agentCount} AGENT{agentCount > 1 ? 'S' : ''} + )}
+
CLICK TO VIEW
); diff --git a/client/src/components/city/cityConstants.js b/client/src/components/city/cityConstants.js index 6bbd2ba8..c0a4bab2 100644 --- a/client/src/components/city/cityConstants.js +++ b/client/src/components/city/cityConstants.js @@ -6,12 +6,12 @@ export const CITY_COLORS = { ambient: '#0d0d2b', building: { online: '#06b6d4', - stopped: '#f59e0b', - not_started: '#6366f1', - not_found: '#6366f1', - archived: '#475569', + stopped: '#ef4444', + not_started: '#8b5cf6', + not_found: '#8b5cf6', + archived: '#64748b', }, - buildingBody: '#10102a', + buildingBody: '#0c0c24', particles: '#06b6d4', stars: '#ffffff', // Neon accent palette for building window/decoration variety diff --git a/client/src/components/cos/CoSCharacter.jsx b/client/src/components/cos/CoSCharacter.jsx index 3666bdd5..d03ec49f 100644 --- a/client/src/components/cos/CoSCharacter.jsx +++ b/client/src/components/cos/CoSCharacter.jsx @@ -4,8 +4,8 @@ export default function CoSCharacter({ state, speaking }) { const stateConfig = AGENT_STATES[state] || AGENT_STATES.sleeping; return ( -
- +
+ diff --git a/client/src/components/cos/CyberCoSAvatar.jsx b/client/src/components/cos/CyberCoSAvatar.jsx index 724e3f47..ef30c52f 100644 --- a/client/src/components/cos/CyberCoSAvatar.jsx +++ b/client/src/components/cos/CyberCoSAvatar.jsx @@ -430,7 +430,7 @@ function Scene({ state, speaking }) { export default function CyberCoSAvatar({ state, speaking }) { return ( -
+
+
+
{ + if (agent.feedback?.rating) setFeedbackState(agent.feedback.rating); + }, [agent.feedback?.rating]); + const [submittingFeedback, setSubmittingFeedback] = useState(false); + const [showFeedbackComment, setShowFeedbackComment] = useState(false); + const [feedbackComment, setFeedbackComment] = useState(''); // Determine if this is a system agent (health check, etc.) const isSystemAgent = agent.taskId?.startsWith('sys-') || agent.id?.startsWith('sys-'); + // Handle feedback submission + const submitFeedback = useCallback(async (rating) => { + if (submittingFeedback) return; + setSubmittingFeedback(true); + + const result = await api.submitCosAgentFeedback(agent.id, { + rating, + comment: feedbackComment || undefined + }).catch(err => { + toast.error(`Failed to submit feedback: ${err.message}`); + return null; + }); + + setSubmittingFeedback(false); + + if (result?.success) { + setFeedbackState(rating); + setShowFeedbackComment(false); + setFeedbackComment(''); + toast.success(`Feedback recorded: ${rating}`); + onFeedbackChange?.(); + } + }, [agent.id, feedbackComment, submittingFeedback, onFeedbackChange]); + // Update duration display for running agents useEffect(() => { if (completed) return; @@ -280,7 +317,7 @@ export default function AgentCard({ agent, onKill, onDelete, onResume, completed Kill )} - {completed && !isSystemAgent && onResume && ( + {completed && onResume && (
)} + + {/* Feedback section - shown for completed non-system agents */} + {completed && !isSystemAgent && ( +
+
+ Was this helpful? +
+ + + {!feedbackState && ( + + )} +
+ {feedbackState && ( + + {feedbackState === 'positive' ? 'Thanks for the feedback!' : 'We\'ll improve'} + + )} +
+ {/* Comment input */} + {showFeedbackComment && !feedbackState && ( +
+ setFeedbackComment(e.target.value)} + placeholder="Optional: add a comment..." + className="flex-1 px-2 py-1 text-sm bg-port-bg border border-port-border rounded text-white placeholder-gray-500 focus:outline-none focus:border-port-accent min-h-[32px]" + maxLength={200} + /> + +
+ )} +
+ )}
{/* Expanded output view */} diff --git a/client/src/components/cos/tabs/AgentsTab.jsx b/client/src/components/cos/tabs/AgentsTab.jsx index 2e7db723..5dcbcd56 100644 --- a/client/src/components/cos/tabs/AgentsTab.jsx +++ b/client/src/components/cos/tabs/AgentsTab.jsx @@ -31,18 +31,19 @@ export default function AgentsTab({ agents, onRefresh, liveOutputs, providers, a setResumingAgent(agent); }; - const handleResumeSubmit = async ({ description, context, model, provider, app }) => { + const handleResumeSubmit = async ({ description, context, model, provider, app, type = 'user' }) => { await api.addCosTask({ description, context, model: model || undefined, provider: provider || undefined, - app: app || undefined + app: app || undefined, + type }).catch(err => { toast.error(err.message); return; }); - toast.success('Created resume task'); + toast.success(`Created ${type === 'internal' ? 'system ' : ''}resume task`); setResumingAgent(null); onRefresh(); }; @@ -150,7 +151,7 @@ export default function AgentsTab({ agents, onRefresh, liveOutputs, providers, a )}
{(searchQuery ? filteredCompletedAgents : filteredCompletedAgents.slice(0, 15)).map(agent => ( - + ))} {!searchQuery && completedAgents.length > 15 && (
@@ -170,6 +171,7 @@ export default function AgentsTab({ agents, onRefresh, liveOutputs, providers, a {resumingAgent && ( 0 ? agent.output.slice(-20).map(o => o.line).join('\n') @@ -39,7 +39,8 @@ export default function ResumeAgentModal({ agent, providers, apps, onSubmit, onC context: fullContext, model: formData.model, provider: formData.provider, - app: formData.app + app: formData.app, + type: taskType }); }; @@ -47,7 +48,9 @@ export default function ResumeAgentModal({ agent, providers, apps, onSubmit, onC
-

Resume Agent Task

+

+ Resume {taskType === 'internal' ? 'System ' : ''}Agent Task +

+ + +
+
+
+ ), + { + id: toastId, + duration: 15000, // 15 seconds - enough time to react but not annoying + style: { + background: '#1a1a1a', + border: '1px solid #2a2a2a', + padding: '12px 16px', + borderRadius: '8px' + } + } + ); + }; + + // Register handler + socket.on('cos:agent:completed', handleAgentCompleted); + + return () => { + socket.off('cos:agent:completed', handleAgentCompleted); + // Don't unsubscribe from cos since other components may use it + }; + }, [submitFeedback]); + + // Clean up old entries periodically (keep last 50) + useEffect(() => { + const cleanup = setInterval(() => { + const entries = Array.from(shownFeedbackFor.current); + if (entries.length > 50) { + shownFeedbackFor.current = new Set(entries.slice(-50)); + } + }, 60000); // Every minute + + return () => clearInterval(cleanup); + }, []); +} diff --git a/client/src/hooks/useMoltworldWs.js b/client/src/hooks/useMoltworldWs.js new file mode 100644 index 00000000..7fb1c6ce --- /dev/null +++ b/client/src/hooks/useMoltworldWs.js @@ -0,0 +1,109 @@ +import { useState, useEffect, useCallback, useRef } from 'react'; +import socket from '../services/socket'; +import * as api from '../services/api'; + +const MAX_FEED_ITEMS = 200; +let idCounter = 0; + +/** + * Hook for Moltworld real-time WebSocket events. + * + * Subscribes to moltworld:* Socket.IO events and maintains: + * - connectionStatus: 'disconnected' | 'connecting' | 'connected' | 'reconnecting' + * - feedItems: ring buffer of last 200 events (newest first) + * - presence: latest agent presence snapshot + * - connect(accountId) / disconnect(): control the server-side WS relay + */ +export default function useMoltworldWs() { + const [connectionStatus, setConnectionStatus] = useState('disconnected'); + const [feedItems, setFeedItems] = useState([]); + const [presence, setPresence] = useState([]); + const feedRef = useRef([]); + + const addFeedItem = useCallback((eventType, data) => { + const item = { + id: ++idCounter, + eventType, + agentName: data.agentName || data.name || data.agentId || '', + content: data.message || data.thought || data.thinking || data.action || '', + timestamp: data.timestamp || Date.now(), + raw: data + }; + feedRef.current = [item, ...feedRef.current].slice(0, MAX_FEED_ITEMS); + setFeedItems([...feedRef.current]); + }, []); + + useEffect(() => { + // Subscribe to agents channel (moltworld events ride on agent subscribers) + socket.emit('agents:subscribe'); + + const handleStatus = (data) => { + setConnectionStatus(data.status || 'disconnected'); + addFeedItem('status', { ...data, agentName: 'System', content: `Connection: ${data.status}` }); + }; + const handleEvent = (data) => { + addFeedItem(data.type || data.event || 'event', data); + }; + const handlePresence = (data) => { + const agents = data.agents || data.nearby || []; + if (agents.length > 0) setPresence(agents); + addFeedItem('presence', { ...data, content: `${agents.length} agents nearby` }); + }; + const handleThinking = (data) => { + addFeedItem('thinking', data); + }; + const handleAction = (data) => { + addFeedItem('action', data); + }; + const handleInteraction = (data) => { + addFeedItem('interaction', data); + }; + const handleNearby = (data) => { + const agents = data.agents || data.nearby || []; + if (agents.length > 0) setPresence(agents); + addFeedItem('nearby', { ...data, content: `${agents.length} agents` }); + }; + + socket.on('moltworld:status', handleStatus); + socket.on('moltworld:event', handleEvent); + socket.on('moltworld:presence', handlePresence); + socket.on('moltworld:thinking', handleThinking); + socket.on('moltworld:action', handleAction); + socket.on('moltworld:interaction', handleInteraction); + socket.on('moltworld:nearby', handleNearby); + + // Fetch initial WS status + api.moltworldWsStatus().then(data => { + if (data?.status) setConnectionStatus(data.status); + }).catch(() => {}); + + return () => { + socket.off('moltworld:status', handleStatus); + socket.off('moltworld:event', handleEvent); + socket.off('moltworld:presence', handlePresence); + socket.off('moltworld:thinking', handleThinking); + socket.off('moltworld:action', handleAction); + socket.off('moltworld:interaction', handleInteraction); + socket.off('moltworld:nearby', handleNearby); + }; + }, [addFeedItem]); + + const connect = useCallback(async (accountId) => { + setConnectionStatus('connecting'); + const result = await api.moltworldWsConnect(accountId).catch(() => null); + if (result?.status) setConnectionStatus(result.status); + }, []); + + const disconnect = useCallback(async () => { + await api.moltworldWsDisconnect().catch(() => null); + setConnectionStatus('disconnected'); + }, []); + + return { + connectionStatus, + feedItems, + presence, + connect, + disconnect + }; +} diff --git a/client/src/index.css b/client/src/index.css index 0c922405..854a470c 100644 --- a/client/src/index.css +++ b/client/src/index.css @@ -27,12 +27,23 @@ font-family: 'GeistPixelGrid', 'Courier New', monospace; } +html, body, #root { + width: 100%; + max-width: 100vw; + overflow-x: hidden; +} + body { background-color: #0f0f0f; color: #e5e5e5; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; } +#root { + min-height: 100vh; + min-height: 100dvh; +} + /* Accessibility: Global focus styles for keyboard navigation */ *:focus-visible { outline: 2px solid #3b82f6; diff --git a/client/src/pages/AIProviders.jsx b/client/src/pages/AIProviders.jsx index 3b545872..3d8495ee 100644 --- a/client/src/pages/AIProviders.jsx +++ b/client/src/pages/AIProviders.jsx @@ -18,6 +18,10 @@ export default function AIProviders() { const [apps, setApps] = useState([]); const [activeRun, setActiveRun] = useState(null); const [runOutput, setRunOutput] = useState(''); + const [showSamples, setShowSamples] = useState(false); + const [sampleProviders, setSampleProviders] = useState([]); + const [loadingSamples, setLoadingSamples] = useState(false); + const [addingSample, setAddingSample] = useState({}); useEffect(() => { loadData(); @@ -138,6 +142,32 @@ export default function AIProviders() { } }; + const handleLoadSamples = async () => { + setLoadingSamples(true); + setShowSamples(true); + const result = await api.getSampleProviders().catch(() => ({ providers: [] })); + setSampleProviders(result.providers || []); + setLoadingSamples(false); + }; + + const handleAddSample = async (provider) => { + setAddingSample(prev => ({ ...prev, [provider.id]: true })); + await api.createProvider(provider); + setSampleProviders(prev => prev.filter(p => p.id !== provider.id)); + setAddingSample(prev => ({ ...prev, [provider.id]: false })); + loadData(); + toast.success(`Added ${provider.name}`); + }; + + const handleAddAllSamples = async () => { + for (const provider of sampleProviders) { + await api.createProvider(provider); + } + setSampleProviders([]); + loadData(); + toast.success(`Added ${sampleProviders.length} providers`); + }; + if (loading) { return (
@@ -157,6 +187,12 @@ export default function AIProviders() { > {showRunPanel ? 'Hide Runner' : 'Run Prompt'} +
+ {/* Sample Providers Panel */} + {showSamples && ( +
+
+

Sample Providers

+
+ {sampleProviders.length > 1 && ( + + )} + +
+
+ + {loadingSamples ? ( +
Loading sample providers...
+ ) : sampleProviders.length === 0 ? ( +
+ All sample providers are already in your configuration. +
+ ) : ( +
+ {sampleProviders.map(provider => ( +
+
+
+

{provider.name}

+ + {provider.type.toUpperCase()} + + {!provider.enabled && ( + + DISABLED + + )} +
+
+ {provider.type === 'cli' && ( +

Command: {provider.command} {provider.args?.join(' ')}

+ )} + {provider.type === 'api' && ( +

Endpoint: {provider.endpoint}

+ )} + {provider.models?.length > 0 && ( +

Models: {provider.models.slice(0, 3).join(', ')}{provider.models.length > 3 ? ` +${provider.models.length - 3}` : ''}

+ )} + {provider.envVars && Object.keys(provider.envVars).length > 0 && ( +

Env: {Object.entries(provider.envVars).map(([k, v]) => `${k}=${v}`).join(', ')}

+ )} +
+
+ +
+ ))} +
+ )} +
+ )} + {/* Run Panel */} {showRunPanel && (
@@ -284,6 +399,13 @@ export default function AIProviders() { Fallback: {providers.find(p => p.id === provider.fallbackProvider)?.name || provider.fallbackProvider}

)} + {provider.envVars && Object.keys(provider.envVars).length > 0 && ( +

+ Env: {Object.entries(provider.envVars).map(([k, v]) => ( + {k}={v} + ))} +

+ )}
{testResults[provider.id] && !testResults[provider.id].testing && ( @@ -421,9 +543,13 @@ function ProviderForm({ provider, onClose, onSave, allProviders = [] }) { heavyModel: provider?.heavyModel || '', fallbackProvider: provider?.fallbackProvider || '', timeout: provider?.timeout || 300000, - enabled: provider?.enabled !== false + enabled: provider?.enabled !== false, + envVars: provider?.envVars || {} }); + const [newEnvKey, setNewEnvKey] = useState(''); + const [newEnvValue, setNewEnvValue] = useState(''); + const availableModels = formData.models || []; // Filter out current provider from fallback options (treat undefined enabled as enabled) @@ -702,6 +828,75 @@ function ProviderForm({ provider, onClose, onSave, allProviders = [] }) {

+ {/* Environment Variables */} +
+

Environment Variables

+ {Object.entries(formData.envVars).length > 0 && ( +
+ {Object.entries(formData.envVars).map(([key, value]) => ( +
+ {key} + setFormData(prev => ({ + ...prev, + envVars: { ...prev.envVars, [key]: e.target.value } + }))} + className="flex-1 min-w-0 px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm focus:border-port-accent focus:outline-none" + /> + +
+ ))} +
+ )} +
+ setNewEnvKey(e.target.value.toUpperCase())} + placeholder="KEY" + className="w-1/3 px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm focus:border-port-accent focus:outline-none font-mono" + /> + setNewEnvValue(e.target.value)} + placeholder="value" + className="flex-1 px-2 py-1.5 bg-port-bg border border-port-border rounded text-white text-sm focus:border-port-accent focus:outline-none" + /> + +
+

+ Environment variables passed to the CLI process (e.g., CLAUDE_CODE_USE_BEDROCK=1, AWS_PROFILE). +

+
+
); } diff --git a/client/src/pages/ChiefOfStaff.jsx b/client/src/pages/ChiefOfStaff.jsx index bd59d07b..68dcf0f3 100644 --- a/client/src/pages/ChiefOfStaff.jsx +++ b/client/src/pages/ChiefOfStaff.jsx @@ -1,9 +1,10 @@ -import { useState, useEffect, useCallback, useMemo } from 'react'; +import { useState, useEffect, useCallback, useMemo, useRef } from 'react'; import { useParams, useNavigate } from 'react-router-dom'; import { useSocket } from '../hooks/useSocket'; import * as api from '../services/api'; -import { Play, Square, Clock, CheckCircle, AlertCircle, Cpu, ChevronDown, ChevronUp, Brain } from 'lucide-react'; +import { Play, Square, Clock, CheckCircle, AlertCircle, Cpu, ChevronDown, ChevronUp, ChevronLeft, ChevronRight, Brain } from 'lucide-react'; import toast from 'react-hot-toast'; +import BrailleSpinner from '../components/BrailleSpinner'; // Import from modular components import { @@ -58,6 +59,9 @@ export default function ChiefOfStaff() { const [agentPanelCollapsed, setAgentPanelCollapsed] = useState(false); const [activeAgentMeta, setActiveAgentMeta] = useState(null); const [learningSummary, setLearningSummary] = useState(null); + const [canScrollLeft, setCanScrollLeft] = useState(false); + const [canScrollRight, setCanScrollRight] = useState(false); + const tabsRef = useRef(null); const socket = useSocket(); // Derive avatar style from server config, with optional dynamic override @@ -325,16 +329,38 @@ export default function ChiefOfStaff() { [tasks.user?.grouped?.pending?.length, tasks.cos?.grouped?.pending?.length] ); + // Check if tabs can scroll left/right + const checkTabsScroll = useCallback(() => { + const el = tabsRef.current; + if (!el) return; + setCanScrollLeft(el.scrollLeft > 0); + setCanScrollRight(el.scrollLeft < el.scrollWidth - el.clientWidth - 1); + }, []); + + // Update scroll state on mount and resize + useEffect(() => { + checkTabsScroll(); + window.addEventListener('resize', checkTabsScroll); + return () => window.removeEventListener('resize', checkTabsScroll); + }, [checkTabsScroll]); + + const scrollTabs = useCallback((direction) => { + const el = tabsRef.current; + if (!el) return; + const scrollAmount = 200; + el.scrollBy({ left: direction === 'left' ? -scrollAmount : scrollAmount, behavior: 'smooth' }); + }, []); + if (loading) { return (
-
Loading...
+
); } return ( -
+
{/* Agent Panel */} {avatarStyle === 'ascii' ? ( ) : ( -
+
{/* Mobile Collapse Toggle Header */} - ); - })} + {/* Tabs - scrollable with arrow navigation */} +
+ {/* Left scroll button */} + {canScrollLeft && ( + + )} + {/* Right scroll button */} + {canScrollRight && ( + + )} +
+ {TABS.map(tabItem => { + const Icon = tabItem.icon; + const isSelected = activeTab === tabItem.id; + return ( + + ); + })} +
{/* Tab Content */} diff --git a/client/src/pages/CyberCity.jsx b/client/src/pages/CyberCity.jsx index 16839429..5c3afdd4 100644 --- a/client/src/pages/CyberCity.jsx +++ b/client/src/pages/CyberCity.jsx @@ -31,14 +31,24 @@ function CyberCityInner() { }, []); const handleBuildingClick = useCallback((app) => { - navigate('/apps'); + if (app?.id) { + navigate(`/apps/${app.id}`); + } else { + navigate('/apps'); + } }, [navigate]); if (loading) { return ( -
-
- INITIALIZING CYBERCITY... +
+
+ INITIALIZING CYBERCITY +
+
+
+
+
+ LOADING SYSTEMS...
); diff --git a/client/src/pages/Dashboard.jsx b/client/src/pages/Dashboard.jsx index 90ce37a9..e22e9c53 100644 --- a/client/src/pages/Dashboard.jsx +++ b/client/src/pages/Dashboard.jsx @@ -1,7 +1,10 @@ import { useState, useEffect, useCallback, useMemo } from 'react'; import { Link } from 'react-router-dom'; import AppTile from '../components/AppTile'; +import BrailleSpinner from '../components/BrailleSpinner'; import CosDashboardWidget from '../components/CosDashboardWidget'; +import GoalProgressWidget from '../components/GoalProgressWidget'; +import UpcomingTasksWidget from '../components/UpcomingTasksWidget'; import * as api from '../services/api'; import socket from '../services/socket'; @@ -57,7 +60,7 @@ export default function Dashboard() { if (loading) { return (
-
Loading...
+
); } @@ -91,6 +94,16 @@ export default function Dashboard() {
+ {/* Goal Progress Widget */} +
+ +
+ + {/* Upcoming Tasks Widget */} +
+ +
+ {/* App Grid */} {apps.length === 0 ? (
diff --git a/client/src/pages/DevTools.jsx b/client/src/pages/DevTools.jsx index c30038fe..085c0672 100644 --- a/client/src/pages/DevTools.jsx +++ b/client/src/pages/DevTools.jsx @@ -6,6 +6,7 @@ import * as api from '../services/api'; import socket from '../services/socket'; import { formatTime, formatRuntime } from '../utils/formatters'; import { processScreenshotUploads } from '../utils/fileUpload'; +import BrailleSpinner from '../components/BrailleSpinner'; export function HistoryPage() { const [history, setHistory] = useState([]); @@ -66,7 +67,7 @@ export function HistoryPage() { }; if (loading) { - return
Loading history...
; + return
; } return ( @@ -428,7 +429,7 @@ export function RunsHistoryPage() { }; if (loading) { - return
Loading runs history...
; + return
; } const failedCount = runs.filter(r => r.success === false).length; @@ -1417,7 +1418,7 @@ export function ProcessesPage() { }; if (loading) { - return
Loading processes...
; + return
; } return ( @@ -1901,7 +1902,7 @@ export function GitPage() {
{loading ? ( -
Loading...
+
) : gitInfo && gitInfo.isRepo ? (
{/* Branch Comparison / Release Status */} @@ -2153,7 +2154,7 @@ export function UsagePage() { }; if (loading) { - return
Loading usage data...
; + return
; } if (!usage) { diff --git a/client/src/pages/PromptManager.jsx b/client/src/pages/PromptManager.jsx index d02b7ba5..8035847d 100644 --- a/client/src/pages/PromptManager.jsx +++ b/client/src/pages/PromptManager.jsx @@ -1,5 +1,6 @@ import { useState, useEffect } from 'react'; import { FileText, Variable, RefreshCw, Save, Plus, Trash2, Eye, Briefcase } from 'lucide-react'; +import BrailleSpinner from '../components/BrailleSpinner'; export default function PromptManager() { const [tab, setTab] = useState('stages'); @@ -233,7 +234,7 @@ export default function PromptManager() { }; if (loading) { - return
Loading...
; + return
; } return ( diff --git a/client/src/pages/Uploads.jsx b/client/src/pages/Uploads.jsx index 3b234b47..dffa31e0 100644 --- a/client/src/pages/Uploads.jsx +++ b/client/src/pages/Uploads.jsx @@ -1,6 +1,7 @@ import { useState, useEffect, useCallback, useRef } from 'react'; import { Upload, Trash2, Download, FileText, Image, File, FolderOpen, RefreshCw } from 'lucide-react'; import toast from 'react-hot-toast'; +import BrailleSpinner from '../components/BrailleSpinner'; import * as api from '../services/api'; // File type icons based on MIME type @@ -137,7 +138,7 @@ export default function Uploads() { if (loading) { return (
-
Loading...
+
); } diff --git a/client/src/services/api.js b/client/src/services/api.js index bc035b0e..0688c111 100644 --- a/client/src/services/api.js +++ b/client/src/services/api.js @@ -3,13 +3,14 @@ import toast from 'react-hot-toast'; const API_BASE = '/api'; async function request(endpoint, options = {}) { + const { silent, ...fetchOptions } = options; const url = `${API_BASE}${endpoint}`; const config = { headers: { 'Content-Type': 'application/json', - ...options.headers + ...fetchOptions.headers }, - ...options + ...fetchOptions }; const response = await fetch(url, config); @@ -17,11 +18,13 @@ async function request(endpoint, options = {}) { if (!response.ok) { const error = await response.json().catch(() => ({ error: 'Request failed' })); const errorMessage = error.error || `HTTP ${response.status}`; - // Platform unavailability is a warning, not an error - if (error.code === 'PLATFORM_UNAVAILABLE') { - toast(errorMessage, { icon: '⚠️' }); - } else { - toast.error(errorMessage); + if (!silent) { + // Platform unavailability is a warning, not an error + if (error.code === 'PLATFORM_UNAVAILABLE') { + toast(errorMessage, { icon: '⚠️' }); + } else { + toast.error(errorMessage); + } } throw new Error(errorMessage); } @@ -133,6 +136,7 @@ export const updateProvider = (id, data) => request(`/providers/${id}`, { body: JSON.stringify(data) }); export const deleteProvider = (id) => request(`/providers/${id}`, { method: 'DELETE' }); +export const getSampleProviders = () => request('/providers/samples'); export const testProvider = (id) => request(`/providers/${id}/test`, { method: 'POST' }); export const refreshProviderModels = (id) => request(`/providers/${id}/refresh-models`, { method: 'POST' }); @@ -375,6 +379,81 @@ export const checkAgentPosts = (agentId, accountId, days, maxReplies, maxUpvotes body: JSON.stringify({ agentId, accountId, days, maxReplies, maxUpvotes }) }); +// Moltworld Tools +export const moltworldJoin = (accountId, x, y, thinking, say, sayTo, agentId) => + request('/agents/tools/moltworld/join', { + method: 'POST', + body: JSON.stringify({ accountId, agentId, x, y, thinking, say, sayTo }) + }); +export const moltworldBuild = (accountId, agentId, x, y, z, type, action) => + request('/agents/tools/moltworld/build', { + method: 'POST', + body: JSON.stringify({ accountId, agentId, x, y, z, type, action }) + }); +export const moltworldExplore = (accountId, agentId, x, y, thinking) => + request('/agents/tools/moltworld/explore', { + method: 'POST', + body: JSON.stringify({ accountId, agentId, x, y, thinking }) + }); +export const moltworldStatus = (accountId) => + request(`/agents/tools/moltworld/status?accountId=${accountId}`); +export const moltworldBalance = (accountId) => + request(`/agents/tools/moltworld/balance?accountId=${accountId}`); +export const moltworldRateLimits = (accountId) => + request(`/agents/tools/moltworld/rate-limits?accountId=${accountId}`); +export const moltworldThink = (accountId, thought, agentId) => + request('/agents/tools/moltworld/think', { + method: 'POST', + body: JSON.stringify({ accountId, agentId, thought }) + }); +export const moltworldSay = (accountId, message, sayTo, agentId) => + request('/agents/tools/moltworld/say', { + method: 'POST', + body: JSON.stringify({ accountId, agentId, message, ...(sayTo ? { sayTo } : {}) }) + }); + +// Moltworld Action Queue +export const moltworldGetQueue = (agentId) => + request(`/agents/tools/moltworld/queue/${agentId}`); +export const moltworldAddToQueue = (agentId, actionType, params, scheduledFor) => + request('/agents/tools/moltworld/queue', { + method: 'POST', + body: JSON.stringify({ agentId, actionType, params, scheduledFor }) + }); +export const moltworldRemoveFromQueue = (id) => + request(`/agents/tools/moltworld/queue/${id}`, { method: 'DELETE' }); + +// Moltworld WebSocket Relay +export const moltworldWsConnect = (accountId) => + request('/agents/tools/moltworld/ws/connect', { + method: 'POST', + body: JSON.stringify({ accountId }) + }); +export const moltworldWsDisconnect = () => + request('/agents/tools/moltworld/ws/disconnect', { method: 'POST' }); +export const moltworldWsStatus = () => + request('/agents/tools/moltworld/ws/status'); +export const moltworldWsMove = (x, y, thought) => + request('/agents/tools/moltworld/ws/move', { + method: 'POST', + body: JSON.stringify({ x, y, ...(thought ? { thought } : {}) }) + }); +export const moltworldWsThink = (thought) => + request('/agents/tools/moltworld/ws/think', { + method: 'POST', + body: JSON.stringify({ thought }) + }); +export const moltworldWsNearby = (radius) => + request('/agents/tools/moltworld/ws/nearby', { + method: 'POST', + body: JSON.stringify({ ...(radius ? { radius } : {}) }) + }); +export const moltworldWsInteract = (to, payload) => + request('/agents/tools/moltworld/ws/interact', { + method: 'POST', + body: JSON.stringify({ to, payload }) + }); + // Agent Drafts export const getAgentDrafts = (agentId) => request(`/agents/tools/drafts?agentId=${agentId}`); export const createAgentDraft = (data) => request('/agents/tools/drafts', { @@ -458,6 +537,11 @@ export const killCosAgent = (id) => request(`/cos/agents/${id}/kill`, { method: export const getCosAgentStats = (id) => request(`/cos/agents/${id}/stats`); export const deleteCosAgent = (id) => request(`/cos/agents/${id}`, { method: 'DELETE' }); export const clearCompletedCosAgents = () => request('/cos/agents/completed', { method: 'DELETE' }); +export const submitCosAgentFeedback = (id, feedback) => request(`/cos/agents/${id}/feedback`, { + method: 'POST', + body: JSON.stringify(feedback) +}); +export const getCosFeedbackStats = () => request('/cos/feedback/stats'); export const getCosReports = () => request('/cos/reports'); export const getCosTodayReport = () => request('/cos/reports/today'); export const getCosReport = (date) => request(`/cos/reports/${date}`); @@ -476,7 +560,7 @@ export const getCosLearningDurations = () => request('/cos/learning/durations'); export const getCosLearningSkipped = () => request('/cos/learning/skipped'); export const getCosLearningPerformance = () => request('/cos/learning/performance'); export const getCosLearningRouting = () => request('/cos/learning/routing'); -export const getCosLearningSummary = () => request('/cos/learning/summary'); +export const getCosLearningSummary = (options) => request('/cos/learning/summary', options); export const backfillCosLearning = () => request('/cos/learning/backfill', { method: 'POST' }); export const resetCosTaskTypeLearning = (taskType) => request(`/cos/learning/reset/${encodeURIComponent(taskType)}`, { method: 'POST' }); @@ -537,11 +621,15 @@ export const getCosProductivity = () => request('/cos/productivity'); export const getCosProductivitySummary = () => request('/cos/productivity/summary'); export const recalculateCosProductivity = () => request('/cos/productivity/recalculate', { method: 'POST' }); export const getCosProductivityTrends = (days = 30) => request(`/cos/productivity/trends?days=${days}`); -export const getCosQuickSummary = () => request('/cos/quick-summary'); -export const getCosRecentTasks = (limit = 10) => request(`/cos/recent-tasks?limit=${limit}`); +export const getCosActivityCalendar = (weeks = 12, options) => request(`/cos/productivity/calendar?weeks=${weeks}`, options); +export const getCosQuickSummary = (options) => request('/cos/quick-summary', options); +export const getCosRecentTasks = (limit = 10, options) => request(`/cos/recent-tasks?limit=${limit}`, options); export const getCosActionableInsights = () => request('/cos/actionable-insights'); +export const getCosGoalProgress = () => request('/cos/goal-progress'); +export const getCosGoalProgressSummary = (options) => request('/cos/goal-progress/summary', options); // Task Schedule (Configurable Intervals) +export const getCosUpcomingTasks = (limit = 10) => request(`/cos/upcoming?limit=${limit}`); export const getCosSchedule = () => request('/cos/schedule'); export const getCosScheduleIntervalTypes = () => request('/cos/schedule/interval-types'); export const getCosScheduleDueTasks = () => request('/cos/schedule/due'); @@ -748,7 +836,11 @@ export const updateBrainProject = (id, data) => request(`/brain/projects/${id}`, export const deleteBrainProject = (id) => request(`/brain/projects/${id}`, { method: 'DELETE' }); // Brain - Ideas -export const getBrainIdeas = () => request('/brain/ideas'); +export const getBrainIdeas = (filters) => { + const params = new URLSearchParams(); + if (filters?.status) params.set('status', filters.status); + return request(`/brain/ideas?${params}`); +}; export const getBrainIdea = (id) => request(`/brain/ideas/${id}`); export const createBrainIdea = (data) => request('/brain/ideas', { method: 'POST', diff --git a/data.sample/providers.json b/data.sample/providers.json index c0bdcc8b..19de9c1a 100644 --- a/data.sample/providers.json +++ b/data.sample/providers.json @@ -16,6 +16,24 @@ "enabled": true, "envVars": {} }, + "claude-code-bedrock": { + "id": "claude-code-bedrock", + "name": "Claude Code CLI: Bedrock", + "type": "cli", + "command": "claude", + "args": ["--print"], + "models": ["us.anthropic.claude-sonnet-4-5-20250929-v1:0", "global.anthropic.claude-opus-4-5-20251101-v1:0", "us.anthropic.claude-opus-4-6-v1:0"], + "defaultModel": "us.anthropic.claude-opus-4-6-v1:0", + "lightModel": "us.anthropic.claude-sonnet-4-5-20250929-v1:0", + "mediumModel": "global.anthropic.claude-opus-4-5-20251101-v1:0", + "heavyModel": "us.anthropic.claude-opus-4-6-v1:0", + "timeout": 300000, + "enabled": false, + "envVars": { + "CLAUDE_CODE_USE_BEDROCK": "1", + "AWS_BEARER_TOKEN_BEDROCK": "" + } + }, "codex": { "id": "codex", "name": "Codex CLI", diff --git a/docs/API.md b/docs/API.md index 18a9d8a1..9106fb3e 100644 --- a/docs/API.md +++ b/docs/API.md @@ -376,6 +376,55 @@ PortOS is designed for personal/developer use on trusted networks. It implements | GET | `/media/video` | Get video stream | | GET | `/media/audio` | Get audio stream | +### Browser Management + +| Method | Endpoint | Description | +|--------|----------|-------------| +| GET | `/browser` | Get browser status | +| GET | `/browser/config` | Get browser configuration | +| PUT | `/browser/config` | Update browser configuration | +| POST | `/browser/launch` | Launch browser instance | +| POST | `/browser/stop` | Stop browser instance | +| POST | `/browser/restart` | Restart browser instance | +| POST | `/browser/navigate` | Navigate browser to URL | +| GET | `/browser/health` | Get browser health status | +| GET | `/browser/process` | Get browser process info | +| GET | `/browser/pages` | Get open browser pages | +| GET | `/browser/version` | Get browser version info | +| GET | `/browser/logs` | Get browser logs | + +### Digital Twin Genome + +| Method | Endpoint | Description | +|--------|----------|-------------| +| GET | `/digital-twin/genome` | Get genome summary | +| POST | `/digital-twin/genome/upload` | Upload 23andMe genome file | +| POST | `/digital-twin/genome/scan` | Scan curated SNP markers | +| POST | `/digital-twin/genome/search` | Search SNP by rsid | +| GET | `/digital-twin/genome/markers` | Get scanned markers | +| GET | `/digital-twin/genome/markers/:rsid` | Get single marker details | +| PUT | `/digital-twin/genome/markers/:rsid/notes` | Update marker notes | +| POST | `/digital-twin/genome/markers/:rsid/save` | Save marker to genome.json | +| DELETE | `/digital-twin/genome/markers/:rsid` | Remove saved marker | +| GET | `/digital-twin/genome/categories` | Get marker categories | +| GET | `/digital-twin/genome/clinvar/:rsid` | Lookup ClinVar data for rsid | +| GET | `/digital-twin/genome/epigenetic` | Get epigenetic interventions | +| POST | `/digital-twin/genome/epigenetic` | Add epigenetic intervention | +| PUT | `/digital-twin/genome/epigenetic/:id` | Update intervention | +| DELETE | `/digital-twin/genome/epigenetic/:id` | Delete intervention | +| POST | `/digital-twin/genome/epigenetic/:id/log` | Log intervention entry | + +### Moltworld Agent Tools + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/agents/tools/moltworld/join` | Join/move agent in world | +| POST | `/agents/tools/moltworld/explore` | Get nearby entities | +| POST | `/agents/tools/moltworld/build` | Place/remove blocks | +| POST | `/agents/tools/moltworld/think` | Display thinking bubble | +| POST | `/agents/tools/moltworld/say` | Send chat message | +| GET | `/agents/tools/moltworld/status` | Get world status | + ## WebSocket Events Connect to Socket.IO at `http://localhost:5554`. diff --git a/docs/research/kalshibot-health-check-2026-02-17.md b/docs/research/kalshibot-health-check-2026-02-17.md new file mode 100644 index 00000000..4e39e76d --- /dev/null +++ b/docs/research/kalshibot-health-check-2026-02-17.md @@ -0,0 +1,170 @@ +# Kalshibot Health Check Analysis — 2026-02-17 + +## Summary + +**Status: DEGRADED** — 0% win rate across 3 trades, -$148.14 total loss on 2026-02-16. All 3 live trades settled at $0 (complete loss of cost basis). Shadow gamma-scalper posted +$46 on 1 trade (100% win rate). Current balance: $1,024.51. + +--- + +## Trade-by-Trade Analysis + +### Trade 1: Settlement Sniper — KXBTC-26FEB1611-B67375 (-$42.77) + +- **Ticker**: B67375 bracket ($67,250-$67,500) +- **Side**: YES (betting BTC settles in this bracket) +- **Entry**: 200 contracts @ 21c ($42.77 + $1.69 fee) at 15:54 UTC +- **Settlement**: YES = $0 at 16:00 UTC (BTC was NOT in this bracket) +- **Loss**: -$42.77 (100% of cost basis, 4.2% of balance) + +**Root cause**: The model estimated >33% fair probability for this bracket (21c + 12% edge). With 200 contracts (the configured max), Kelly sizing put $42 at risk on a single binary outcome. BTC settled outside this range, zeroing the position. + +**Key issue**: `settlementRideThreshold: 0.40` may have prevented the 60s exit window from triggering. If the model still showed 40%+ edge at t-60s, the position rode to $0 instead of exiting with a partial loss. + +### Trade 2: Coinbase Fair Value — KXBTC-26FEB1611-B67625 (-$52.79) + +- **Ticker**: B67625 bracket ($67,500-$67,750) +- **Side**: NO (betting BTC does NOT settle in this bracket) +- **Entry**: 186 contracts @ 28c ($52.79 + $2.57 fee) at 15:56 UTC +- **Settlement**: NO = $0 at 16:00 UTC (BTC WAS in this bracket — NO bet lost) +- **Loss**: -$52.79 (100% of cost basis, 5.2% of balance) + +**Root cause**: The strategy used a lowered `edgeThreshold: 0.20` (default is 0.25) and wider `maxSecondsToSettlement: 300` (default is 180). Entry at 3m35s before settlement with a 20% edge threshold allowed a signal that wouldn't have passed at the default 25% threshold. The NO side was wrong — BTC landed in this bracket. + +**Position sizing note**: 186 contracts @ 28c = $52, exceeding the 3% `maxBetPct` of ~$30. The `calculatePositionSize` method may not be correctly capping by `maxBetPct`. + +### Trade 3: Coinbase Fair Value — KXBTC-26FEB1612-B67625 (-$52.58) + +- **Ticker**: B67625 bracket ($67,500-$67,750), next hour +- **Side**: YES (betting BTC settles in this bracket) +- **Entry**: 141 contracts across 3 fills @ 37-38c ($52.58 + $2.32 fee) at 16:56 UTC +- **Settlement**: YES = $0 at 17:00 UTC (BTC was NOT in this bracket) +- **Loss**: -$52.58 (100% of cost basis, 5.1% of balance) + +**Root cause**: Same bracket, opposite side, next hour. BTC moved away from $67,500-$67,750 between 16:00 and 17:00. Higher entry price (37-38c) indicates greater model confidence, but the thesis was still wrong. + +### Shadow Trade: Gamma Scalper — KXBTC-26FEB1612-B67875 (+$46.00) + +- **Ticker**: B67875 bracket ($67,750-$68,000) +- **Side**: NO (betting BTC does NOT settle in this bracket) +- **Entry**: 50 contracts @ 8c ($4.00 + $0.26 fee) at 16:57 UTC +- **Settlement**: NO = $1.00 at 17:00 UTC ($50 proceeds, +$46 profit) +- **Edge reported**: 77.1% + +**Why it outperformed**: +1. Tiny risk: $4 total cost vs $42-52 for live strategies +2. Asymmetric payoff: 8c entry for $1 payout = 12.5x return +3. Strong signal: 77.1% edge vs 12-20% threshold for live strategies +4. Correct thesis: BTC was not in the $67,750-$68,000 range + +--- + +## Systemic Issues Identified + +### 1. Position Sizing Too Aggressive for Binary Bracket Outcomes + +All 3 live trades risked $42-52 each (4-5% of balance). Bracket markets settle at $0 or $1 — there's no partial recovery. Current `maxBetPct` settings (5% sniper, 3% fair-value) allow catastrophic per-trade losses. + +### 2. Coinbase Fair Value Config Deviates from Safer Defaults + +| Parameter | Current | Default | Risk Impact | +|-----------|---------|---------|-------------| +| `edgeThreshold` | 0.20 | 0.25 | Allows noisier signals | +| `maxSecondsToSettlement` | 300 | 180 | Enters too early, less certain | +| `exitEdgeThreshold` | 0.08 | 0.10 | Holds losing positions longer | +| `maxPositions` | 3 | 2 | More concurrent risk | + +### 3. No Per-Window Exposure Cap + +Trades 1 and 2 both targeted the 16:00 UTC settlement window. Combined exposure: $95 (9.3% of balance) on a single 15-minute interval. No mechanism caps aggregate risk per settlement window. + +### 4. Settlement Ride Exception May Amplify Losses + +The sniper's `settlementRideThreshold: 0.40` can override the forced exit at t-60s. In bracket markets where the probability model can be persistently wrong (model shows 40% edge but the bracket misses), this turns a possible small-loss exit into a guaranteed 100% loss. + +### 5. Gamma-Scalper Live Execution Gap (Root Cause Confirmed) + +Gamma-scalper is `enabled: true` in config but was blocked from live execution by the **one-position-per-settlement-window** rule in `simulation-engine.js` (lines 773-798). The engine evaluates strategies in config order: settlement-sniper → coinbase-fair-value → momentum-rider → gamma-scalper. By the time gamma-scalper generated its B67875 signal at 16:57 UTC, the coinbase-fair-value strategy had already placed a position (B67625 YES) in the 17:00 UTC settlement window, triggering the cross-position conflict check. + +**Code path**: `simulation-engine.js:773-798` — when a buy signal arrives, the engine checks if any existing position or pending reservation shares the same `close_time`. If so, the signal is rejected with `"settlement window conflict"`. Since gamma-scalper evaluates last in the strategy loop (`simulation-engine.js:680`), it always loses to earlier strategies. + +**Why the shadow trade succeeded**: Shadow evaluation (`simulation-engine.js:863-925`) runs against `shadowState.positions`, which is separate from live positions. The shadow state had no positions in the 17:00 window, so the gamma-scalper signal passed. + +**Fix required**: Strategy evaluation order should prioritize lower-risk strategies (gamma-scalper at $4/trade) over higher-risk ones ($50/trade), or the engine should collect all signals first and rank them before executing. + +--- + +## Recommended Parameter Changes + +### Immediate (config.json changes only) + +```json +{ + "strategies": { + "settlement-sniper": { + "params": { + "maxBetPct": 0.03, + "maxContracts": 100, + "settlementRideThreshold": 1.0 + } + }, + "coinbase-fair-value": { + "params": { + "edgeThreshold": 0.25, + "exitEdgeThreshold": 0.10, + "maxSecondsToSettlement": 180, + "maxPositions": 2 + } + }, + "gamma-scalper": { + "params": { + "maxPositions": 3 + } + } + } +} +``` + +**Rationale per change**: + +1. **sniper `maxBetPct` 0.05 -> 0.03**: Cap single-trade risk at 3%. Trade 1 would have risked ~$21 instead of $42. +2. **sniper `maxContracts` 200 -> 100**: Hard cap on position size. Combined with lower `maxBetPct`, prevents outsized bracket bets. +3. **sniper `settlementRideThreshold` 0.40 -> 1.0**: Effectively disables settlement riding. Forces positions to exit at t-60s instead of riding to $0. Can be re-enabled after more shadow testing validates the feature. +4. **fair-value `edgeThreshold` 0.20 -> 0.25**: Restore default. Requires 25% divergence before entry, filtering out Trade 2's 20% signal. +5. **fair-value `exitEdgeThreshold` 0.08 -> 0.10**: Exit sooner when thesis weakens. +6. **fair-value `maxSecondsToSettlement` 300 -> 180**: Restore default. Prevents entries at 3m+ before settlement where vol estimates are noisier. +7. **fair-value `maxPositions` 3 -> 2**: Reduce concurrent risk exposure. +8. **gamma-scalper `maxPositions` 2 -> 3**: Give the proven low-risk strategy more room to deploy. + +### Post-Analysis Config Audit (2026-02-17) + +After the initial health check, some parameters were applied to `config.json` but several were applied incorrectly or missed: + +| Parameter | Health Check Target | Current Config | Status | +|-----------|-------------------|----------------|--------| +| sniper `maxBetPct` | 0.03 | 0.03 | Applied | +| sniper `maxContracts` | 100 | 100 | Applied | +| sniper `settlementRideThreshold` | 1.0 | 0.40 | **NOT applied** | +| fair-value `edgeThreshold` | 0.25 | 0.15 | **WRONG DIRECTION** (lowered instead of raised) | +| fair-value `exitEdgeThreshold` | 0.10 | 0.10 | Applied | +| fair-value `maxSecondsToSettlement` | 180 | 300 | **NOT applied** | +| fair-value `maxPositions` | 2 | 2 | Applied | +| gamma-scalper `maxPositions` | 3 | 2 | **NOT applied** | + +The coinbase-fair-value `edgeThreshold` being set to 0.15 (lower than the previous 0.20) makes the strategy **more aggressive**, which is the opposite of the intended fix. This must be corrected to 0.25 immediately. + +### Code Changes Needed (Kalshibot repo) + +1. **Strategy evaluation order by risk** (CRITICAL): In `simulation-engine.js:680`, the strategy loop evaluates in config order. Since only one position per settlement window is allowed (line 773-798), the first strategy to claim a window wins. Change the loop to sort enabled strategies by `maxBetPct` ascending (cheapest first), so gamma-scalper ($4/trade) gets priority over fair-value ($50/trade). This single change would have allowed the +$46 gamma-scalper trade to execute live. +2. **Per-window exposure cap**: Already implemented at `simulation-engine.js:800-815` with `maxExposurePerWindow: 75`. This was added after the initial analysis — verify it's working correctly. +3. **Position size audit**: Verify `calculatePositionSize` in `base-strategy.js` correctly enforces `maxBetPct` — Trade 2's $52 cost exceeded the 3% cap of ~$30. + +--- + +## Impact Estimate + +If these parameter changes had been active on 2026-02-16: +- **Trade 1**: ~$21 loss instead of $42 (maxContracts: 100, maxBetPct: 0.03) — already applied +- **Trade 2**: Filtered out entirely (edgeThreshold 0.25 would reject the 20% signal) +- **Trade 3**: Likely filtered or reduced (tighter maxSecondsToSettlement: 180 blocks 4m-early entries) +- **Gamma-scalper**: With strategy-order-by-risk, would have claimed the 17:00 window first → +$46 live +- **Estimated day**: -$21 + $46 = **+$25 net** instead of -$148 — a $173 improvement diff --git a/docs/research/pumpfun-data-sources.md b/docs/research/pumpfun-data-sources.md new file mode 100644 index 00000000..3c9fe87a --- /dev/null +++ b/docs/research/pumpfun-data-sources.md @@ -0,0 +1,459 @@ +# Pump.fun Launch Tracking Engine: Data Source Evaluation + +**Brain Project**: 467fbe07 — Pump.fun Launch Tracking Engine +**Date**: 2026-02-16 +**Deadline**: 2026-02-20 +**Objective**: Evaluate Helius, Birdeye, and pump.fun APIs for rate limits, auth, and schemas + +--- + +## Executive Summary + +Three primary data source categories were evaluated for the Pump.fun Launch Tracking Engine: + +1. **Helius** — Solana-native RPC/infrastructure with Enhanced APIs, webhooks, and gRPC streaming +2. **Birdeye** — DeFi analytics platform with rich token/price/trade REST APIs +3. **pump.fun Direct + Third-Party Indexers** — pump.fun's own frontend APIs plus Bitquery/bloXroute GraphQL indexers + +**Recommendation**: Use Helius (Developer tier, $49/mo) as the primary real-time data source for new token detection and transaction monitoring. Supplement with Birdeye (Starter tier, $99/mo) for enriched token analytics, price history, and holder data. Use pump.fun direct APIs sparingly for metadata not available elsewhere. + +--- + +## 1. Helius + +### Overview +Solana-native RPC and API platform. Best-in-class for raw blockchain data, real-time streaming, and transaction parsing on Solana. + +### Authentication +- API key appended as query parameter: `?api-key=YOUR_KEY` +- Keys managed via [Helius Dashboard](https://dashboard.helius.dev) +- SDK available: `@helius-labs/helius-sdk` (npm) + +### Pricing & Rate Limits + +| Plan | Cost/mo | Credits/mo | RPC RPS | DAS API RPS | Enhanced API RPS | WebSockets | +|------|---------|-----------|---------|-------------|-----------------|------------| +| Free | $0 | 1M | 10 | 2 | 2 | Standard only | +| Developer | $49 | 10M | 50 | 10 | 10 | Standard only | +| Business | $499 | 100M | 200 | 50 | 50 | Enhanced | +| Professional | $999 | 200M | 500 | 100 | 100 | Enhanced + LaserStream | + +### Key APIs for Pump.fun Tracking + +**Enhanced Transactions API** +- `POST https://api-mainnet.helius-rpc.com/v0/transactions?api-key=KEY` — parse up to 100 signatures per request +- `GET https://api-mainnet.helius-rpc.com/v0/addresses/{address}/transactions?api-key=KEY` — address history with pagination +- Parses raw Solana transactions into human-readable format +- Decodes instruction data, token transfers, balance changes +- Response includes: `description`, `type`, `source`, `fee`, `feePayer`, `signature`, `slot`, `timestamp`, `nativeTransfers`, `tokenTransfers`, `accountData`, `events` +- Filter by pump.fun program ID: `6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P` +- Commitment levels: `finalized` (default) or `confirmed` +- Error responses: 400 (bad request), 401 (auth), 429 (rate limit), 500/503/504 (server) + +**Webhooks** +- Push-based event delivery for on-chain events +- Configurable filters: `TOKEN_MINT`, account-specific, program-specific +- Can monitor pump.fun program for new token creates and trades +- Eliminates polling — server receives events as they happen + +**gRPC Streaming (LaserStream)** +- Real-time account and transaction streams +- Filter by program owner for pump.fun bonding curve accounts +- Commitment level: `CONFIRMED` +- Tracks: operation type, user/fee payer, signatures, timestamps, balance changes +- **Note**: Professional plan only for mainnet gRPC + +**DAS (Digital Asset Standard) API** +- Token metadata, ownership, and collection queries +- Useful for enriching token data post-detection + +### Response Schema (Enhanced Transaction) +```json +{ + "description": "string", + "type": "SWAP|TOKEN_MINT|TRANSFER|...", + "source": "PUMP_FUN|RAYDIUM|...", + "fee": 5000, + "feePayer": "pubkey", + "signature": "txid", + "timestamp": 1700000000, + "nativeTransfers": [{ "fromUserAccount": "...", "toUserAccount": "...", "amount": 1000000 }], + "tokenTransfers": [{ "fromTokenAccount": "...", "toTokenAccount": "...", "tokenAmount": 1000, "mint": "..." }], + "accountData": [{ "account": "...", "nativeBalanceChange": -5000, "tokenBalanceChanges": [...] }] +} +``` + +### Strengths +- Lowest latency for new token detection (webhooks + gRPC) +- Native pump.fun program filtering +- Enhanced transaction parsing reduces client-side logic +- Staked connections on all paid plans for high tx success +- Well-documented SDK + +### Limitations +- gRPC/LaserStream requires Professional ($999/mo) for mainnet +- Enhanced WebSocket metering (3 credits/0.1MB) for new users +- Raw blockchain data — no pre-built analytics (no OHLCV, no market cap aggregation) + +--- + +## 2. Birdeye + +### Overview +DeFi analytics platform with comprehensive REST APIs for token data, pricing, trades, OHLCV, and wallet analytics. Covers Solana and 30+ other chains. + +### Authentication +- API key via header: `X-API-KEY: YOUR_KEY` +- Keys managed via [Birdeye Dashboard](https://bds.birdeye.so) +- Optional `chain` parameter defaults to Solana + +### Pricing & Rate Limits + +| Plan | Cost/mo | Compute Units | Global RPS | WebSockets | +|------|---------|--------------|-----------|------------| +| Standard (Free) | $0 | 30K | 1 | No | +| Lite | $39 | 1.5M | 15 | No | +| Starter | $99 | 5M | 15 | No | +| Premium | $199 | 15M | 50 | No | +| Premium Plus | $250 | 20M | 50 | 500 conns | +| Business (B-05) | $499 | 50M | 100 | 2000 conns | +| Business | $699 | 100M | 100 | Yes | + +**Per-Endpoint Rate Limits** (within global account limit): + +| Endpoint | Path | Max RPS | +|----------|------|---------| +| Price (single) | `/defi/price` | 300 | +| Price (multi) | `/defi/multi_price` | 300 | +| Price (historical) | `/defi/history_price` | 100 | +| Token Overview | `/defi/token_overview` | 300 | +| Token Security | `/defi/token_security` | 150 | +| Token List v3 | `/defi/v3/token/list` | 100 | +| Trades (token) | `/defi/txs/token` | 100 | +| Trades (pair) | `/defi/txs/pair` | 100 | +| OHLCV | `/defi/ohlcv` | 100 | +| Wallet Portfolio | varies | 30 rpm | + +### Key APIs for Pump.fun Tracking + +**Token Overview** (`/defi/token_overview`) +- Market cap, liquidity, volume, price change, holder count +- Single call returns comprehensive token analytics + +**Token Security** (`/defi/token_security`) +- Rug-pull risk indicators, mint authority, freeze authority +- Critical for filtering high-risk launches + +**Price APIs** (`/defi/price`, `/defi/history_price`) +- Real-time and historical pricing in SOL/USD +- Multi-token batch pricing supported + +**Trade APIs** (`/defi/txs/token`) +- Recent trades with buy/sell side, amounts, timestamps +- Pair-level trade history + +**OHLCV** (`/defi/ohlcv`) +- Candlestick data at configurable intervals +- Useful for charting and trend detection + +**Token List** (`/defi/v3/token/list`) +- Sortable by volume, market cap, price change +- Filter by timeframe for trending tokens + +### Response Schema (Token Overview) +```json +{ + "address": "mint_address", + "name": "Token Name", + "symbol": "TKN", + "decimals": 9, + "price": 0.00123, + "priceChange24hPercent": 150.5, + "volume24h": 500000, + "marketCap": 1200000, + "liquidity": 50000, + "holder": 2500, + "supply": 1000000000, + "logoURI": "https://...", + "extensions": { "website": "...", "twitter": "..." } +} +``` + +### Response Schema (Price API — `GET /defi/price?address=MINT`) +```json +{ + "success": true, + "data": { + "value": 0.38622, + "updateUnixTime": 1745058945, + "updateHumanTime": "2025-04-19T10:35:45", + "priceChange24h": 1.93, + "priceInNative": 0.00277, + "liquidity": 10854103.37 + } +} +``` + +### Strengths +- Richest analytics out of the box (market cap, liquidity, security scores) +- Pre-computed OHLCV eliminates aggregation logic +- Token security endpoint critical for filtering scams +- Batch pricing for monitoring multiple tokens +- Clean REST API, easy to integrate + +### WebSocket: New Token Listing Stream + +Available on Premium Plus ($250/mo) and above. Directly relevant for pump.fun launch detection. + +- **URL**: `wss://public-api.birdeye.so/socket/solana?x-api-key=YOUR_KEY` +- **Headers**: `Origin: ws://public-api.birdeye.so`, `Sec-WebSocket-Protocol: echo-protocol` +- **Subscribe**: `{ "type": "SUBSCRIBE_TOKEN_NEW_LISTING", "meme_platform_enabled": true, "sources": ["pump_dot_fun"] }` +- **CU cost**: 0.08 CU per byte + +Response schema: +```json +{ + "type": "TOKEN_NEW_LISTING_DATA", + "data": { + "address": "BkQfwVktcbWmxePJN5weHWJZgReWbiz8gzTdFa2w7Uds", + "decimals": 6, + "name": "Worker Cat", + "symbol": "$MCDCAT", + "liquidity": "12120.155172280874", + "liquidityAddedAt": 1720155863 + } +} +``` + +Supports `min_liquidity`/`max_liquidity` filters and 100+ DEX source filters including Raydium, Orca, Meteora, and pump.fun. + +### Compute Unit Costs (Key Endpoints) + +| Endpoint | CU Cost | Notes | +|----------|---------|-------| +| Token Price | 10 | Cheapest price check | +| Token Metadata | 5 | Very low cost | +| Token List v3 | 100 | Higher cost for list queries | +| Trades (token) | 10 | Affordable for trade monitoring | +| OHLCV | 40 | Moderate | +| Token New Listing (REST) | 80 | One-shot listing check | +| WS: New Listing | 0.08/byte | Streaming cost scales with data | +| WS: Price | 0.003/byte | Very affordable streaming | +| WS: Transactions | 0.0004/byte | Cheapest stream | + +### Limitations +- No push-based event delivery on tiers below Premium Plus (polling only) +- WebSocket access requires Premium Plus ($250/mo) minimum +- New token detection via REST has inherent latency — tokens must be indexed first +- Wallet endpoints severely rate-limited (30 rpm) +- Compute unit costs can escalate with heavy usage +- CU costs subject to change without notice + +--- + +## 3. pump.fun Direct APIs + Third-Party Indexers + +### 3a. pump.fun Frontend API (Direct) + +### Overview +pump.fun exposes several undocumented/semi-official API services. These are reverse-engineered from the frontend and may change without notice. + +### Base URLs + +| Service | URL | Purpose | +|---------|-----|---------| +| Frontend API v3 | `https://frontend-api-v3.pump.fun` | Token data, listings | +| Advanced Analytics v2 | `https://advanced-api-v2.pump.fun` | Analytics, rankings | +| Market API | `https://market-api.pump.fun` | Market data | +| Profile API | `https://profile-api.pump.fun` | User profiles | +| Swap API | `https://swap-api.pump.fun` | Token swaps | +| Volatility API v2 | `https://volatility-api-v2.pump.fun` | Volatility metrics | + +### Authentication +- JWT Bearer token: `Authorization: Bearer ` +- Required headers: `Origin: https://pump.fun`, `Accept: application/json` +- Rate limit headers in responses: `x-ratelimit-limit`, `x-ratelimit-remaining`, `x-ratelimit-reset` + +### Key Capabilities +- **483 documented endpoints** across all API versions +- Token creation details, bonding curve status, graduation tracking +- Direct access to pump.fun-specific metadata not available elsewhere +- Creator profiles and reputation data + +### Key V3 Endpoints +- `GET /coins/latest` — latest token launches +- `GET /coins/{mint}` — token details by mint address +- `GET /trades/latest` — latest trades across all tokens +- `GET /trades/token/{mint}` — trades for specific token + +### Observed Rate Limits +- ~20 requests per minute (RPM) across all endpoints +- Rate limit headers in responses: `x-ratelimit-limit`, `x-ratelimit-remaining`, `x-ratelimit-reset` +- HTTP 429 on exceeded limits +- Recommended: exponential backoff with max 3 retries + +### Limitations +- **Undocumented/unofficial** — endpoints can break without warning +- JWT auth requires mimicking browser authentication flow +- Rate limits are restrictive (~20 RPM) and undocumented officially +- No SLA or support +- Legal gray area for automated access +- WebSocket support listed as "coming soon" — not yet available + +--- + +### 3b. Bitquery (GraphQL Indexer) + +### Overview +Third-party GraphQL indexer with dedicated pump.fun query support. Real-time subscriptions for new tokens, trades, and bonding curve events. + +### Authentication +- API key via header or query parameter +- Free tier available via [Bitquery IDE](https://ide.bitquery.io) + +### Pricing + +| Plan | Cost | Points | RPS | Streams | +|------|------|--------|-----|---------| +| Developer (Free) | $0 | 1,000 | 10 req/min | 2 | +| Commercial | Custom | Custom | Custom | Custom | + +### Key APIs for Pump.fun Tracking +- **Token creation subscriptions** — real-time stream of new pump.fun launches +- **Trade subscriptions** — buy/sell with amounts and prices +- **Bonding curve status** — track graduation progress +- **ATH market cap** — all-time high calculations +- **Top traders/holders** — wallet analytics +- **Creator reputation** — all tokens by a creator address + +### GraphQL Query Example (New Token Subscription) +```graphql +subscription { + Solana { + Instructions( + where: { + Instruction: { + Program: { + Address: { is: "6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P" } + } + } + Transaction: { Result: { Success: true } } + } + ) { + Transaction { Signature } + Instruction { + Accounts { Address Token { Mint Owner } } + Program { Method } + } + Block { Time } + } + } +} +``` + +### Limitations +- Free tier extremely limited (1,000 points, 10 req/min, 2 streams) +- Commercial pricing requires sales contact — no self-serve +- Points-based billing is opaque — hard to predict costs +- GraphQL complexity can lead to unexpected point consumption + +--- + +### 3c. bloXroute (Streaming) + +### Overview +Specializes in low-latency Solana data streaming with dedicated pump.fun channels. + +### Key Endpoints +- `GetPumpFunNewTokensStream` — real-time new token events +- `GetPumpFunSwapsStream` — real-time swap monitoring +- `GetPumpFunAMMSwapsStream` — AMM swap events post-graduation + +### Limitations +- Pricing not publicly documented +- Primarily targets high-frequency trading use cases +- Overkill for analytics/tracking use case + +--- + +## Comparison Matrix + +| Criteria | Helius | Birdeye | pump.fun Direct | Bitquery | +|----------|--------|---------|-----------------|----------| +| **New token detection latency** | ~1s (webhook/gRPC) | 5-30s (REST) / ~2s (WS) | Unknown | ~2-5s (subscription) | +| **Real-time streaming** | gRPC + WebSocket | WS w/ pump.fun filter ($250+) | No | GraphQL subscriptions | +| **Token analytics** | Raw tx data only | Rich (mcap, vol, security) | Basic metadata | Rich (GraphQL) | +| **OHLCV / Charts** | No | Yes | No | Yes | +| **Security scoring** | No | Yes | No | Partial | +| **Holder data** | Via DAS API | Via token overview | No | Yes (top 10) | +| **Auth complexity** | API key (simple) | API key (simple) | JWT (complex) | API key (simple) | +| **Stability / SLA** | Production-grade | Production-grade | No SLA, may break | Production-grade | +| **Min. useful tier** | Developer ($49) | Starter ($99) | Free (risky) | Commercial ($$?) | +| **Pump.fun specific** | Program filter | General DeFi | Native | Dedicated queries | +| **SDK / DX** | Excellent (npm SDK) | REST (straightforward) | None | GraphQL IDE | + +--- + +## Recommended Architecture + +``` + +------------------+ + | Helius ($49) | + | Webhooks/WS | + +--------+---------+ + | + New token events + Raw transactions + | + v + +------------------+ + | Tracking Engine | + | (PortOS app) | + +--------+---------+ + | + Token enrichment + Analytics queries + | + v + +------------------+ + | Birdeye ($99) | + | REST API | + +------------------+ + - Market cap, volume + - Security scores + - OHLCV data + - Holder counts +``` + +### Phase 1 (MVP): Helius Developer ($49/mo) +- Webhook listening for pump.fun program transactions +- Detect new token creates via `TOKEN_MINT` events +- Parse creator address, token mint, initial supply +- Store in PortOS data layer + +### Phase 2 (Enrichment): Add Birdeye Starter ($99/mo) +- Enrich detected tokens with market data +- Token security scoring for scam filtering +- OHLCV data for trend detection +- Track high-performing tokens over time + +### Phase 3 (Analytics): Evaluate Bitquery or pump.fun direct +- Creator reputation analysis +- Sniper account inventory +- Launch prediction model inputs + +**Total estimated cost**: $148/mo for Phase 1+2 + +--- + +## Next Steps + +1. **Create Helius account** and generate API key +2. **Set up webhook** for pump.fun program (`6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P`) +3. **Create Birdeye account** and generate API key +4. **Build proof-of-concept** endpoint in PortOS that: + - Receives Helius webhook events + - Extracts new token mint + creator + - Enriches via Birdeye token overview + - Persists to `data/pumpfun/tokens.json` +5. **Validate latency** — measure time from on-chain creation to detection diff --git a/ecosystem.config.cjs b/ecosystem.config.cjs index 0f47d728..1ab7f209 100644 --- a/ecosystem.config.cjs +++ b/ecosystem.config.cjs @@ -69,7 +69,8 @@ module.exports = { interpreter: 'node', env: { NODE_ENV: 'development', - PORT: PORTS.AUTOFIXER + PORT: PORTS.AUTOFIXER, + PATH: process.env.PATH // Inherit PATH for nvm/node access in child processes }, watch: false, autorestart: true, diff --git a/package-lock.json b/package-lock.json index 4a72bd7f..d6a7316f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "portos", - "version": "0.12.48", + "version": "0.13.20", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "portos", - "version": "0.12.48", + "version": "0.13.20", "license": "MIT", "workspaces": [ "packages/*", @@ -29,7 +29,7 @@ }, "client": { "name": "portos-client", - "version": "0.12.48", + "version": "0.13.20", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", @@ -9365,16 +9365,17 @@ }, "server": { "name": "portos-server", - "version": "0.12.48", + "version": "0.13.20", "dependencies": { "cors": "^2.8.5", "express": "^4.21.2", "node-pty": "^1.2.0-beta.10", "pm2": "^5.4.3", - "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.4.1", + "portos-ai-toolkit": "^0.5.0", "socket.io": "^4.8.3", "socket.io-client": "^4.8.3", "uuid": "^11.0.3", + "ws": "^8.18.0", "zod": "^3.24.1" }, "devDependencies": { @@ -9454,6 +9455,27 @@ "node": ">=10" } }, + "server/node_modules/@pm2/agent/node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "server/node_modules/@pm2/io": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/@pm2/io/-/io-6.0.1.tgz", @@ -9841,6 +9863,40 @@ } } }, + "server/node_modules/portos-ai-toolkit": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/portos-ai-toolkit/-/portos-ai-toolkit-0.5.0.tgz", + "integrity": "sha512-2ZJjQYa0CDT0yDapozWHsVpz2nIeqBlXKQeaFOxlqFCHEOryqs96Qj2YyD2XZwH6kN4Il1vrbv58R6SA6H6W1A==", + "license": "MIT", + "dependencies": { + "uuid": "^11.0.3", + "zod": "^3.24.1" + }, + "peerDependencies": { + "express": "^4.21.2 || ^5.2.1", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "socket.io": "^4.8.3", + "socket.io-client": "^4.8.3" + }, + "peerDependenciesMeta": { + "express": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "socket.io": { + "optional": true + }, + "socket.io-client": { + "optional": true + } + } + }, "server/node_modules/proxy-agent": { "version": "6.3.1", "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.3.1.tgz", @@ -9974,6 +10030,27 @@ "node": ">= 0.6" } }, + "server/node_modules/ws": { + "version": "8.19.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", + "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "server/node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", diff --git a/package.json b/package.json index d9f14645..35ec0af4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "portos", - "version": "0.12.48", + "version": "0.13.20", "private": true, "description": "Local dev machine App OS portal", "author": "Adam Eivy (@antic|@atomantic)", @@ -17,12 +17,12 @@ "dev:client": "cd client && npm run dev", "build": "cd client && npm run build", "start": "cd server && npm start", - "pm2:start": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 start ecosystem.config.cjs", - "pm2:stop": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 stop ecosystem.config.cjs", - "pm2:restart": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 restart ecosystem.config.cjs", - "pm2:logs": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 logs", - "pm2:status": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 status", - "pm2:kill": "PM2_HOME=.pm2 ./node_modules/.bin/pm2 kill", + "pm2:start": "pm2 start ecosystem.config.cjs", + "pm2:stop": "pm2 stop ecosystem.config.cjs", + "pm2:restart": "pm2 restart ecosystem.config.cjs", + "pm2:logs": "pm2 logs", + "pm2:status": "pm2 status", + "pm2:kill": "pm2 kill", "install:all": "npm install && cd client && npm install && cd ../server && npm install && cd .. && npm run setup", "setup": "node scripts/setup-data.js && node scripts/setup-browser.js", "setup:data": "node scripts/setup-data.js", diff --git a/server/index.js b/server/index.js index b3f2ca3f..909807b7 100644 --- a/server/index.js +++ b/server/index.js @@ -36,6 +36,8 @@ import digitalTwinRoutes from './routes/digital-twin.js'; import socialAccountsRoutes from './routes/socialAccounts.js'; import lmstudioRoutes from './routes/lmstudio.js'; import browserRoutes from './routes/browser.js'; +import moltworldToolsRoutes from './routes/moltworldTools.js'; +import moltworldWsRoutes from './routes/moltworldWs.js'; import { initSocket } from './services/socket.js'; import { initScriptRunner } from './services/scriptRunner.js'; import { errorMiddleware, setupProcessErrorHandlers, asyncHandler } from './lib/errorHandler.js'; @@ -185,6 +187,8 @@ app.use('/api/agents/personalities', agentPersonalitiesRoutes); app.use('/api/agents/accounts', platformAccountsRoutes); app.use('/api/agents/schedules', automationSchedulesRoutes); app.use('/api/agents/activity', agentActivityRoutes); +app.use('/api/agents/tools/moltworld/ws', moltworldWsRoutes); +app.use('/api/agents/tools/moltworld', moltworldToolsRoutes); app.use('/api/agents/tools', agentToolsRoutes); // Existing running agents routes (process management) app.use('/api/agents', agentsRoutes); diff --git a/server/integrations/moltworld/README.md b/server/integrations/moltworld/README.md new file mode 100644 index 00000000..c8a869d6 --- /dev/null +++ b/server/integrations/moltworld/README.md @@ -0,0 +1,79 @@ +# Moltworld Integration + +This directory contains a self-contained integration for Moltworld, a shared voxel world (480x480 grid) where AI agents move, build structures, think out loud, communicate, and earn SIM tokens. + +## Files + +- `api.js` - REST API client with all Moltworld actions +- `rateLimits.js` - Rate limit tracking and enforcement (per-action + global 60 req/min) +- `index.js` - Main export with MoltworldClient class + +## Usage + +### In PortOS + +```javascript +import { MoltworldClient, register } from './integrations/moltworld/index.js'; + +// Register a new agent +const { agentId, apiKey } = await register('MyAgent', { color: '#3b82f6', emoji: '🤖' }); + +// Create a client for an existing agent +const client = new MoltworldClient(apiKey, agentId); + +// Join/move in the world (also heartbeat) +const world = await client.joinWorld({ name: 'MyAgent', x: 5, y: -3 }); + +// Think out loud +await client.think('Exploring the voxel world...'); + +// Build a structure +await client.build({ x: 5, y: 3, z: 0, type: 'stone', action: 'place' }); + +// Check balance +const { balance } = await client.getBalance(); +``` + +### Key Differences from Moltbook + +| Feature | Moltbook | Moltworld | +|---------|----------|-----------| +| Auth | Bearer token header | agentId in body/query | +| Actions | Posts, comments, voting | Movement, building, thinking | +| Lifecycle | Persistent | Expires after 10 min inactivity | +| Economy | Karma | SIM tokens (0.1/hour online) | +| Verification | Math challenges | None | + +### Rate Limits + +| Action | Cooldown | Daily Limit | +|--------|----------|-------------| +| Join/Move | 5 seconds | 17,280/day | +| Build | 1 second | 500/day | +| Think | 5 seconds | 1,000/day | +| **Global** | — | **60 req/min** | + +### Block Types + +- `wood`, `stone`, `dirt`, `grass`, `leaves` + +### Coordinate System + +- World: -240 to 240 (X and Y) +- Building: -500 to 500 (X and Y), 0-100 (Z height) + +## Integration with PortOS Scheduler + +```javascript +import { MoltworldClient } from './integrations/moltworld/index.js'; + +// Heartbeat — call every 5-10s to keep agent visible +const client = new MoltworldClient(apiKey, agentId); +await client.joinWorld({ name: 'MyAgent', x: 0, y: 0 }); + +// Explore — move to coordinates and think +await client.joinWorld({ name: 'MyAgent', x: 10, y: 20, thinking: 'Nice view from here' }); + +// Build — place blocks +await client.build({ x: 10, y: 20, z: 0, type: 'stone' }); +``` diff --git a/server/integrations/moltworld/api.js b/server/integrations/moltworld/api.js new file mode 100644 index 00000000..602aeec5 --- /dev/null +++ b/server/integrations/moltworld/api.js @@ -0,0 +1,242 @@ +/** + * Moltworld API Client + * + * REST API client for Moltworld - a shared voxel world where AI agents move, + * build structures, think out loud, communicate, and earn SIM tokens. + * + * API Base: https://moltworld.io + * Auth: agentId in request body/query (not Bearer token) + */ + +import { checkRateLimit, recordAction, syncFromExternal } from './rateLimits.js'; + +const API_BASE = 'https://moltworld.io'; + +/** + * Infer the rate-limited action type from a Moltworld API endpoint + */ +function inferActionFromEndpoint(endpoint, method) { + if (method !== 'POST') return null; + if (endpoint === '/api/world/join') return 'join'; + if (endpoint === '/api/world/build') return 'build'; + if (endpoint === '/api/world/think') return 'think'; + return null; +} + +/** + * Make an API request to Moltworld + */ +async function request(endpoint, options = {}) { + const url = `${API_BASE}${endpoint}`; + const config = { + headers: { + 'Content-Type': 'application/json', + ...options.headers + }, + ...options + }; + + console.log(`🌍 Moltworld API: ${options.method || 'GET'} ${endpoint}`); + + const fetchResult = await fetch(url, config).then(r => ({ ok: true, response: r }), e => ({ ok: false, error: e })); + + if (!fetchResult.ok) { + console.error(`❌ Moltworld API unreachable: ${fetchResult.error.message}`); + const err = new Error('Moltworld is currently unavailable'); + err.status = 503; + err.code = 'PLATFORM_UNAVAILABLE'; + throw err; + } + + const response = fetchResult.response; + + if (!response.ok) { + const error = await response.json().catch(() => ({})); + const message = error.error || error.message || `HTTP ${response.status}`; + if (response.status !== 404) { + console.error(`❌ Moltworld API error: ${response.status} ${message}`); + } else { + console.log(`🌍 Moltworld API: 404 ${endpoint}`); + } + // Sync local rate limit state when platform enforces a cooldown + if (response.status === 429) { + const body = options.body ? JSON.parse(options.body) : {}; + const agentId = body.agentId; + const action = inferActionFromEndpoint(endpoint, options.method); + if (agentId && action) { + syncFromExternal(agentId, action); + console.log(`⏱️ Synced ${action} rate limit from 429 response`); + } + } + + const err = new Error(message); + err.status = response.status >= 500 ? 503 : response.status; + err.code = response.status >= 500 ? 'PLATFORM_UNAVAILABLE' : undefined; + throw err; + } + + if (response.status === 204) { + return null; + } + + return response.json(); +} + +// ============================================================================= +// REGISTRATION +// ============================================================================= + +/** + * Register a new agent on Moltworld + * @param {string} name - Display name for the agent + * @param {Object} appearance - Appearance config { color, emoji, style } + * @returns {{ agentId: string, apiKey: string, position: Object }} Registration result + */ +export async function register(name, appearance = {}) { + const result = await request('/api/agents/register', { + method: 'POST', + body: JSON.stringify({ + name, + worldId: 'alpha', + appearance: { + color: appearance.color || '#3b82f6', + emoji: appearance.emoji || '🤖', + style: appearance.style || 'robot' + } + }) + }); + + // Normalize: API may return { agent: { id } } or { agentId } + const agentId = result?.agentId || result?.agent?.id || result?.id; + const apiKey = result?.apiKey || result?.api_key || agentId; + console.log(`🆕 Moltworld: Registered agent "${name}" (agentId=${agentId})`); + return { ...result, agentId, apiKey }; +} + +// ============================================================================= +// PROFILE / ACCOUNT +// ============================================================================= + +/** + * Get the agent's profile + * @param {string} agentId - The agent's ID + */ +export async function getProfile(agentId) { + return request(`/api/agents/profile?agentId=${encodeURIComponent(agentId)}`); +} + +/** + * Update the agent's profile + * @param {string} agentId - The agent's ID + * @param {Object} updates - Profile updates (name, appearance) + */ +export async function updateProfile(agentId, updates) { + return request('/api/agents/profile', { + method: 'PATCH', + body: JSON.stringify({ agentId, ...updates }) + }); +} + +/** + * Get the agent's SIM token balance + * @param {string} agentId - The agent's ID + */ +export async function getBalance(agentId) { + return request(`/api/agents/balance?agentId=${encodeURIComponent(agentId)}`); +} + +// ============================================================================= +// WORLD ACTIONS +// ============================================================================= + +/** + * Join/move in the world — also serves as heartbeat to stay visible + * Must be called every 5-10 seconds to keep agent alive. + * @param {string} agentId - The agent's ID + * @param {Object} options - Movement and communication options + * @param {string} options.name - Agent display name + * @param {number} options.x - X coordinate (-240 to 240) + * @param {number} options.y - Y coordinate (-240 to 240) + * @param {string} [options.thinking] - Thought bubble text + * @param {string} [options.say] - Broadcast message to nearby agents + * @param {string} [options.sayTo] - Direct message to specific agent ID + */ +export async function joinWorld(agentId, options = {}) { + const rateCheck = checkRateLimit(agentId, 'join'); + if (!rateCheck.allowed) { + const err = new Error(`Rate limited: ${rateCheck.reason}`); + err.status = 429; + err.waitMs = rateCheck.waitMs; + throw err; + } + + const body = { agentId, ...options }; + const result = await request('/api/world/join', { + method: 'POST', + body: JSON.stringify(body) + }); + + recordAction(agentId, 'join'); + console.log(`🌍 Moltworld: Agent joined/moved (x=${options.x}, y=${options.y})`); + return result; +} + +/** + * Think out loud — visible to nearby agents + * @param {string} agentId - The agent's ID + * @param {string} thought - The thought text + */ +export async function think(agentId, thought) { + const rateCheck = checkRateLimit(agentId, 'think'); + if (!rateCheck.allowed) { + const err = new Error(`Rate limited: ${rateCheck.reason}`); + err.status = 429; + err.waitMs = rateCheck.waitMs; + throw err; + } + + const result = await request('/api/world/think', { + method: 'POST', + body: JSON.stringify({ agentId, thought }) + }); + + recordAction(agentId, 'think'); + console.log(`💭 Moltworld: Agent thought: "${thought.substring(0, 50)}..."`); + return result; +} + +/** + * Build or remove a block in the world + * @param {string} agentId - The agent's ID + * @param {Object} options - Build options + * @param {number} options.x - X coordinate (-500 to 500) + * @param {number} options.y - Y coordinate (-500 to 500) + * @param {number} options.z - Z height (0 to 100) + * @param {string} options.type - Block type: wood, stone, dirt, grass, leaves + * @param {string} [options.action='place'] - 'place' or 'remove' + */ +export async function build(agentId, options = {}) { + const rateCheck = checkRateLimit(agentId, 'build'); + if (!rateCheck.allowed) { + const err = new Error(`Rate limited: ${rateCheck.reason}`); + err.status = 429; + err.waitMs = rateCheck.waitMs; + throw err; + } + + const result = await request('/api/world/build', { + method: 'POST', + body: JSON.stringify({ + agentId, + x: options.x, + y: options.y, + z: options.z, + type: options.type || 'stone', + action: options.action || 'place' + }) + }); + + recordAction(agentId, 'build'); + console.log(`🧱 Moltworld: ${options.action || 'place'} ${options.type || 'stone'} at (${options.x},${options.y},${options.z})`); + return result; +} diff --git a/server/integrations/moltworld/index.js b/server/integrations/moltworld/index.js new file mode 100644 index 00000000..b218598d --- /dev/null +++ b/server/integrations/moltworld/index.js @@ -0,0 +1,56 @@ +/** + * Moltworld Integration + * + * This module provides integration with Moltworld — a shared voxel world + * where AI agents move, build structures, think out loud, communicate, + * and earn SIM tokens. + * + * @module integrations/moltworld + */ + +// Re-export all API functions +export * from './api.js'; + +// Re-export rate limit utilities +export { + MOLTWORLD_RATE_LIMITS, + checkRateLimit, + recordAction, + getRateLimitStatus, + clearRateLimitState +} from './rateLimits.js'; + +// Export a convenience client class for stateful usage +import * as api from './api.js'; +import { getRateLimitStatus } from './rateLimits.js'; + +/** + * Moltworld client for a specific agent account + */ +export class MoltworldClient { + constructor(apiKey, agentId) { + this.apiKey = apiKey; + this.agentId = agentId; + } + + // World actions + joinWorld(options) { return api.joinWorld(this.agentId, options); } + think(thought) { return api.think(this.agentId, thought); } + build(options) { return api.build(this.agentId, options); } + + // Profile + getProfile() { return api.getProfile(this.agentId); } + updateProfile(updates) { return api.updateProfile(this.agentId, updates); } + + // Balance + getBalance() { return api.getBalance(this.agentId); } + + // Rate limits + getRateLimitStatus() { return getRateLimitStatus(this.agentId); } +} + +/** + * Register a new agent on Moltworld + * This is a static method since it doesn't require an existing agent ID + */ +MoltworldClient.register = api.register; diff --git a/server/integrations/moltworld/rateLimits.js b/server/integrations/moltworld/rateLimits.js new file mode 100644 index 00000000..c697c013 --- /dev/null +++ b/server/integrations/moltworld/rateLimits.js @@ -0,0 +1,196 @@ +/** + * Moltworld Rate Limits + * + * Rate limit configuration for Moltworld API actions. + * These limits are enforced by the platform — exceeding them will result in errors. + * Global limit: 60 requests/minute per agent. + */ + +export const MOLTWORLD_RATE_LIMITS = { + join: { + cooldownMs: 5 * 1000, // 5 seconds between joins (heartbeat) + maxPerDay: 17280 // Generous — agents call every 5-10s + }, + build: { + cooldownMs: 1 * 1000, // 1 second between builds + maxPerDay: 500 // Maximum builds per day + }, + think: { + cooldownMs: 5 * 1000, // 5 seconds between thoughts + maxPerDay: 1000 // Maximum thoughts per day + } +}; + +// Global rate limit: 60 requests per minute per agent +const GLOBAL_WINDOW_MS = 60 * 1000; +const GLOBAL_MAX_REQUESTS = 60; + +// In-memory rate limit tracking per agent ID +const rateLimitState = new Map(); +const globalRateState = new Map(); + +/** + * Get rate limit state for an agent ID + */ +function getState(agentId) { + if (!rateLimitState.has(agentId)) { + rateLimitState.set(agentId, { + join: { lastAction: 0, todayCount: 0, dayStart: Date.now() }, + build: { lastAction: 0, todayCount: 0, dayStart: Date.now() }, + think: { lastAction: 0, todayCount: 0, dayStart: Date.now() } + }); + } + + const state = rateLimitState.get(agentId); + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + + // Reset daily counters if day has changed + for (const action of Object.keys(state)) { + if (now - state[action].dayStart > oneDayMs) { + state[action].todayCount = 0; + state[action].dayStart = now; + } + } + + return state; +} + +/** + * Get global rate limit state for an agent + */ +function getGlobalState(agentId) { + if (!globalRateState.has(agentId)) { + globalRateState.set(agentId, []); + } + const timestamps = globalRateState.get(agentId); + const now = Date.now(); + // Prune timestamps outside the window + const pruned = timestamps.filter(t => now - t < GLOBAL_WINDOW_MS); + globalRateState.set(agentId, pruned); + return pruned; +} + +/** + * Check if an action is rate limited + * @param {string} agentId - The agent ID to check + * @param {string} action - The action type (join, build, think) + * @returns {{ allowed: boolean, waitMs?: number, reason?: string }} + */ +export function checkRateLimit(agentId, action) { + // Check global rate limit first + const globalTimestamps = getGlobalState(agentId); + if (globalTimestamps.length >= GLOBAL_MAX_REQUESTS) { + const oldestInWindow = globalTimestamps[0]; + return { + allowed: false, + reason: `Global rate limit (${GLOBAL_MAX_REQUESTS}/min)`, + waitMs: GLOBAL_WINDOW_MS - (Date.now() - oldestInWindow) + }; + } + + const limits = MOLTWORLD_RATE_LIMITS[action]; + if (!limits) { + return { allowed: true }; + } + + const state = getState(agentId); + const actionState = state[action]; + const now = Date.now(); + + // Check daily limit + if (actionState.todayCount >= limits.maxPerDay) { + return { + allowed: false, + reason: `Daily limit reached (${limits.maxPerDay}/${action}s per day)`, + waitMs: actionState.dayStart + 24 * 60 * 60 * 1000 - now + }; + } + + // Check cooldown + const timeSinceLast = now - actionState.lastAction; + if (timeSinceLast < limits.cooldownMs) { + return { + allowed: false, + reason: `Cooldown active (${Math.ceil(limits.cooldownMs / 1000)}s between ${action}s)`, + waitMs: limits.cooldownMs - timeSinceLast + }; + } + + return { allowed: true }; +} + +/** + * Record an action for rate limiting + * @param {string} agentId - The agent ID + * @param {string} action - The action type + */ +export function recordAction(agentId, action) { + // Record per-action state + const state = getState(agentId); + if (state[action]) { + state[action].lastAction = Date.now(); + state[action].todayCount++; + } + + // Record in global sliding window + const globalTimestamps = getGlobalState(agentId); + globalTimestamps.push(Date.now()); +} + +/** + * Get current rate limit status for all actions + * @param {string} agentId - The agent ID + * @returns {Object} Status for each action type + */ +export function getRateLimitStatus(agentId) { + const state = getState(agentId); + const now = Date.now(); + const status = {}; + + for (const [action, limits] of Object.entries(MOLTWORLD_RATE_LIMITS)) { + const actionState = state[action]; + const timeSinceLast = now - actionState.lastAction; + const cooldownRemaining = Math.max(0, limits.cooldownMs - timeSinceLast); + + status[action] = { + todayCount: actionState.todayCount, + maxPerDay: limits.maxPerDay, + remaining: limits.maxPerDay - actionState.todayCount, + cooldownMs: limits.cooldownMs, + cooldownRemainingMs: cooldownRemaining, + canAct: actionState.todayCount < limits.maxPerDay && cooldownRemaining === 0 + }; + } + + // Add global rate info + const globalTimestamps = getGlobalState(agentId); + status._global = { + requestsInWindow: globalTimestamps.length, + maxPerMinute: GLOBAL_MAX_REQUESTS, + remaining: GLOBAL_MAX_REQUESTS - globalTimestamps.length + }; + + return status; +} + +/** + * Sync local rate limit state from a platform 429 response + * @param {string} agentId - The agent ID + * @param {string} action - The action type + */ +export function syncFromExternal(agentId, action) { + const state = getState(agentId); + if (state[action]) { + state[action].lastAction = Date.now(); + } +} + +/** + * Clear rate limit state for an agent ID (e.g., on account deletion) + * @param {string} agentId - The agent ID + */ +export function clearRateLimitState(agentId) { + rateLimitState.delete(agentId); + globalRateState.delete(agentId); +} diff --git a/server/lib/brainValidation.js b/server/lib/brainValidation.js index 8af8c79d..6def5e06 100644 --- a/server/lib/brainValidation.js +++ b/server/lib/brainValidation.js @@ -6,6 +6,9 @@ export const destinationEnum = z.enum(['people', 'projects', 'ideas', 'admin', ' // Project status enum export const projectStatusEnum = z.enum(['active', 'waiting', 'blocked', 'someday', 'done']); +// Idea status enum +export const ideaStatusEnum = z.enum(['active', 'done']); + // Admin status enum export const adminStatusEnum = z.enum(['open', 'waiting', 'done']); @@ -92,6 +95,7 @@ export const projectRecordSchema = z.object({ export const ideaRecordSchema = z.object({ id: z.string().uuid(), title: z.string().min(1).max(200), + status: ideaStatusEnum.default('active'), oneLiner: z.string().min(1).max(500), notes: z.string().max(5000).optional(), tags: z.array(z.string().max(50)).optional().default([]), @@ -197,6 +201,7 @@ export const projectInputSchema = z.object({ // Create/Update Idea input schema export const ideaInputSchema = z.object({ title: z.string().min(1).max(200), + status: ideaStatusEnum.optional().default('active'), oneLiner: z.string().min(1).max(500), notes: z.string().max(5000).optional(), tags: z.array(z.string().max(50)).optional() @@ -244,6 +249,7 @@ export const extractedProjectSchema = z.object({ // Extracted Idea fields export const extractedIdeaSchema = z.object({ title: z.string().min(1).max(200), + status: ideaStatusEnum.optional().default('active'), oneLiner: z.string().min(1).max(500), notes: z.string().max(5000).optional().default(''), tags: z.array(z.string().max(50)).optional().default([]) diff --git a/server/lib/curatedGenomeMarkers.js b/server/lib/curatedGenomeMarkers.js index 208abf7a..56eef3f5 100644 --- a/server/lib/curatedGenomeMarkers.js +++ b/server/lib/curatedGenomeMarkers.js @@ -26,7 +26,13 @@ export const MARKER_CATEGORIES = { mental_health: { label: 'Mental Health', icon: 'Brain', color: 'violet' }, bone_health: { label: 'Bone Health', icon: 'Bone', color: 'stone' }, pharmacogenomics: { label: 'Pharmacogenomics', icon: 'Pill', color: 'fuchsia' }, - cancer_risk: { label: 'Cancer Predisposition', icon: 'ShieldCheck', color: 'red' }, + cancer_breast: { label: 'Breast & Ovarian Cancer', icon: 'Ribbon', color: 'pink' }, + cancer_prostate: { label: 'Prostate Cancer', icon: 'ShieldCheck', color: 'blue' }, + cancer_colorectal: { label: 'Colorectal Cancer', icon: 'ShieldCheck', color: 'amber' }, + cancer_lung: { label: 'Lung Cancer', icon: 'Wind', color: 'slate' }, + cancer_melanoma: { label: 'Melanoma Risk', icon: 'Sun', color: 'stone' }, + cancer_bladder: { label: 'Bladder Cancer', icon: 'ShieldCheck', color: 'zinc' }, + cancer_digestive: { label: 'Digestive Cancer', icon: 'ShieldCheck', color: 'lime' }, hair: { label: 'Hair Loss', icon: 'Scissors', color: 'zinc' }, hearing: { label: 'Hearing', icon: 'Ear', color: 'slate' }, pain: { label: 'Pain Sensitivity', icon: 'Zap', color: 'orange' } @@ -1234,7 +1240,7 @@ export const CURATED_MARKERS = [ rsid: 'rs61764370', gene: 'KRAS (3\'UTR)', name: 'KRAS let-7 MicroRNA Binding Variant', - category: 'cancer_risk', + category: 'tumor_suppression', description: 'This variant in the 3\'UTR of KRAS disrupts a let-7 microRNA binding site, increasing KRAS expression. Associated with increased risk for non-small cell lung cancer, ovarian cancer, and triple-negative breast cancer. Particularly relevant for never-smokers with lung cancer.', implications: { beneficial: 'T/T — normal let-7 regulation of KRAS. Standard cancer risk baseline.', @@ -1434,6 +1440,664 @@ export const CURATED_MARKERS = [ { genotypes: ['G/A', 'A/G'], status: 'beneficial' }, { genotypes: ['G/G'], status: 'typical' } ] + }, + + // === BREAST & OVARIAN CANCER === + { + rsid: 'rs2981582', + gene: 'FGFR2', + name: 'FGFR2 Breast Cancer Susceptibility', + category: 'cancer_breast', + description: 'FGFR2 (Fibroblast Growth Factor Receptor 2) is one of the strongest and most replicated breast cancer GWAS loci. The T allele increases ER-positive breast cancer risk ~1.26x per copy. Stronger association with estrogen receptor-positive disease.', + implications: { + beneficial: 'C/C — lower genetic risk for breast cancer at this locus.', + typical: 'C/T — one risk allele. Mildly elevated breast cancer risk (~1.26x).', + concern: 'T/T — two risk alleles. ~1.63x increased breast cancer risk, particularly ER-positive subtypes.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs3803662', + gene: 'TOX3', + name: 'TOX3 Breast Cancer Susceptibility', + category: 'cancer_breast', + description: 'TOX3 (TNRC9) at 16q12 is a well-replicated breast cancer susceptibility locus. The T allele correlates with lower TOX3 expression, increasing breast cancer risk ~1.20x per copy. Replicated across multiple populations.', + implications: { + beneficial: 'C/C — lower genetic risk at this locus.', + typical: 'C/T — one risk allele. Modestly increased breast cancer risk (~1.20x).', + concern: 'T/T — two risk alleles. ~1.40x increased breast cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs889312', + gene: 'MAP3K1', + name: 'MAP3K1 Breast Cancer Susceptibility', + category: 'cancer_breast', + description: 'MAP3K1 at 5q11 encodes a MAPK kinase involved in cell proliferation signaling. The C allele is associated with modestly increased breast cancer risk (~1.13x per copy).', + implications: { + beneficial: 'A/A — lower genetic risk at this locus.', + typical: 'A/C — one risk allele. Mildly increased breast cancer risk.', + concern: 'C/C — two risk alleles. ~1.22x increased breast cancer risk.' + }, + rules: [ + { genotypes: ['A/A'], status: 'beneficial' }, + { genotypes: ['A/C', 'C/A'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs13387042', + gene: '2q35 intergenic', + name: 'Chromosome 2q35 Breast Cancer Locus', + category: 'cancer_breast', + description: 'Intergenic variant at 2q35 with strong replication across European and Hispanic populations. The A allele increases breast cancer risk ~1.15x per copy.', + implications: { + beneficial: 'G/G — lower genetic risk at this locus.', + typical: 'A/G — one risk allele. Mildly increased breast cancer risk.', + concern: 'A/A — two risk alleles. ~1.40x increased breast cancer risk.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + { + rsid: 'rs3817198', + gene: 'LSP1', + name: 'LSP1 Breast Cancer Susceptibility', + category: 'cancer_breast', + description: 'LSP1 (Lymphocyte-Specific Protein 1) at 11p15. The C allele modestly increases breast cancer risk.', + implications: { + beneficial: 'T/T — lower genetic risk at this locus.', + typical: 'C/T — one risk allele. Small increase in breast cancer risk.', + concern: 'C/C — two risk alleles. ~1.15x increased breast cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs10941679', + gene: '5p12/FGF10', + name: '5p12 ER-Positive Breast Cancer Locus', + category: 'cancer_breast', + description: 'Variant near FGF10/MRPS30 at 5p12. The G allele is associated with increased ER-positive breast cancer risk. Stronger effect in estrogen receptor-positive disease.', + implications: { + beneficial: 'A/A — lower genetic risk at this locus.', + typical: 'A/G — one risk allele. Modestly increased ER+ breast cancer risk.', + concern: 'G/G — two risk alleles. ~1.30x increased ER-positive breast cancer risk.' + }, + rules: [ + { genotypes: ['A/A'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['G/G'], status: 'concern' } + ] + }, + { + rsid: 'rs4973768', + gene: 'SLC4A7', + name: 'SLC4A7 Breast Cancer Susceptibility', + category: 'cancer_breast', + description: 'SLC4A7 (Solute Carrier Family 4 Member 7) at 3p24. The T allele increases breast cancer risk ~1.11x per copy. Well-replicated in GWAS meta-analyses.', + implications: { + beneficial: 'C/C — lower genetic risk at this locus.', + typical: 'C/T — one risk allele. Small increase in breast cancer risk.', + concern: 'T/T — two risk alleles. Modestly increased breast cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs1045485', + gene: 'CASP8', + name: 'CASP8 Breast Cancer Protection (D302H)', + category: 'cancer_breast', + description: 'CASP8 (Caspase-8) D302H variant at 2q33. Unusual marker where the minor C allele is protective, reducing breast cancer risk in a dose-dependent manner (OR ~0.89 per C allele).', + implications: { + beneficial: 'C/C — protective genotype. Reduced breast cancer risk (~0.74x).', + typical: 'C/G — one protective allele. Modestly reduced breast cancer risk.', + concern: 'G/G — common genotype. Standard baseline breast cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/G', 'G/C'], status: 'beneficial' }, + { genotypes: ['G/G'], status: 'typical' } + ] + }, + { + rsid: 'i4000377', + gene: 'BRCA1', + name: 'BRCA1 185delAG (Ashkenazi Founder)', + category: 'cancer_breast', + description: 'Ashkenazi Jewish founder mutation in BRCA1 (185delAG). FDA-approved 23andMe test. Carriers have ~72% lifetime breast cancer risk and ~44% ovarian cancer risk. High-penetrance pathogenic variant.', + implications: { + beneficial: 'I/I — no deletion detected. Standard population-level risk.', + major_concern: 'D/I — deletion carrier. Very high lifetime risk for breast and ovarian cancer. Genetic counseling strongly recommended.' + }, + rules: [ + { genotypes: ['I/I'], status: 'beneficial' }, + { genotypes: ['D/I', 'I/D', 'D/D'], status: 'major_concern' } + ] + }, + { + rsid: 'i4000378', + gene: 'BRCA1', + name: 'BRCA1 5382insC (Ashkenazi Founder)', + category: 'cancer_breast', + description: 'Ashkenazi Jewish founder mutation in BRCA1 (5382insC). FDA-approved 23andMe test. Carriers have substantially elevated lifetime breast and ovarian cancer risk. High-penetrance pathogenic variant.', + implications: { + beneficial: 'I/I — no insertion detected. Standard population-level risk.', + major_concern: 'D/I — insertion carrier. Very high lifetime risk for breast and ovarian cancer. Genetic counseling strongly recommended.' + }, + rules: [ + { genotypes: ['I/I'], status: 'beneficial' }, + { genotypes: ['D/I', 'I/D', 'D/D'], status: 'major_concern' } + ] + }, + { + rsid: 'i4000379', + gene: 'BRCA2', + name: 'BRCA2 6174delT (Ashkenazi Founder)', + category: 'cancer_breast', + description: 'Ashkenazi Jewish founder mutation in BRCA2 (6174delT). FDA-approved 23andMe test. Carriers have elevated lifetime risk for breast, ovarian, prostate, and pancreatic cancer. High-penetrance pathogenic variant.', + implications: { + beneficial: 'I/I — no deletion detected. Standard population-level risk.', + major_concern: 'D/I — deletion carrier. Elevated lifetime risk for breast, ovarian, prostate, and pancreatic cancer. Genetic counseling strongly recommended.' + }, + rules: [ + { genotypes: ['I/I'], status: 'beneficial' }, + { genotypes: ['D/I', 'I/D', 'D/D'], status: 'major_concern' } + ] + }, + { + rsid: 'rs3814113', + gene: 'BNC2', + name: 'BNC2 Ovarian Cancer Susceptibility', + category: 'cancer_breast', + description: 'BNC2 at 9p22 is the strongest GWAS association for high-grade serous ovarian carcinoma. The C allele is protective, reducing ovarian cancer risk.', + implications: { + beneficial: 'C/C — protective genotype. Reduced ovarian cancer risk.', + typical: 'C/T — one protective allele. Modestly reduced ovarian cancer risk.', + concern: 'T/T — standard risk genotype for high-grade serous ovarian cancer.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + + // === PROSTATE CANCER === + { + rsid: 'rs6983267', + gene: '8q24/MYC region', + name: '8q24 Multi-Cancer Risk Locus', + category: 'cancer_prostate', + description: 'One of the most replicated multi-cancer variants at 8q24 near MYC. The G allele affects Wnt/TCF4 signaling and increases prostate cancer risk (~1.26x) and colorectal cancer risk (~1.50x). Population attributable risk ~21%.', + implications: { + beneficial: 'T/T — lower genetic risk at this multi-cancer locus.', + typical: 'G/T — one risk allele. Modestly increased prostate and colorectal cancer risk.', + concern: 'G/G — two risk alleles. ~1.26x prostate, ~1.50x colorectal cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['G/T', 'T/G'], status: 'typical' }, + { genotypes: ['G/G'], status: 'concern' } + ] + }, + { + rsid: 'rs1447295', + gene: '8q24 (region 1)', + name: '8q24 Region 1 Prostate Cancer Locus', + category: 'cancer_prostate', + description: 'Independent 8q24 signal for prostate cancer risk. The A allele increases risk ~1.22x per copy. Population attributable risk ~9% for prostate cancer.', + implications: { + beneficial: 'C/C — lower genetic risk at this locus.', + typical: 'A/C — one risk allele. Modestly increased prostate cancer risk.', + concern: 'A/A — two risk alleles. ~1.44x increased prostate cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['A/C', 'C/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + { + rsid: 'rs10993994', + gene: 'MSMB', + name: 'MSMB Prostate Cancer Susceptibility', + category: 'cancer_prostate', + description: 'MSMB (Microseminoprotein-Beta) at 10q11. The T risk allele downregulates MSMB expression by ~70%, increasing prostate cancer risk ~1.24x per copy.', + implications: { + beneficial: 'C/C — normal MSMB expression. Lower prostate cancer risk.', + typical: 'C/T — one risk allele. Reduced MSMB expression, modestly increased prostate cancer risk.', + concern: 'T/T — significantly reduced MSMB expression (~70% lower). ~1.50x increased prostate cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs4430796', + gene: 'HNF1B', + name: 'HNF1B Prostate & Endometrial Cancer', + category: 'cancer_prostate', + description: 'HNF1B (TCF2) at 17q12 is a pleiotropic locus also associated with type 2 diabetes. The A allele increases prostate cancer risk ~1.25x.', + implications: { + beneficial: 'G/G — lower genetic risk at this locus.', + typical: 'A/G — one risk allele. Modestly increased prostate cancer risk.', + concern: 'A/A — two risk alleles. ~1.25x increased prostate cancer risk.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + { + rsid: 'rs1859962', + gene: '17q24.3', + name: 'Chromosome 17q24 Prostate Cancer Locus', + category: 'cancer_prostate', + description: 'Variant at 17q24.3 associated with prostate cancer risk. The G allele increases risk ~1.37x for homozygous carriers.', + implications: { + beneficial: 'T/T — lower genetic risk at this locus.', + typical: 'G/T — one risk allele. Modestly increased prostate cancer risk.', + concern: 'G/G — two risk alleles. ~1.37x increased prostate cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['G/T', 'T/G'], status: 'typical' }, + { genotypes: ['G/G'], status: 'concern' } + ] + }, + { + rsid: 'rs16901979', + gene: '8q24 (region 2)', + name: '8q24 Region 2 Prostate Cancer Locus', + category: 'cancer_prostate', + description: 'Independent 8q24 signal from rs6983267 and rs1447295. The A allele increases prostate cancer risk ~1.47x.', + implications: { + beneficial: 'C/C — lower genetic risk at this locus.', + typical: 'A/C — one risk allele. Increased prostate cancer risk.', + concern: 'A/A — two risk alleles. ~1.47x increased prostate cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['A/C', 'C/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + + // === COLORECTAL CANCER === + { + rsid: 'rs4939827', + gene: 'SMAD7', + name: 'SMAD7 Colorectal Cancer Protection', + category: 'cancer_colorectal', + description: 'SMAD7 at 18q21 modulates TGF-β signaling. The T allele is protective, reducing colorectal cancer risk ~0.77x per copy. Stronger association with rectal than colon cancer.', + implications: { + beneficial: 'T/T — protective genotype. Reduced colorectal cancer risk.', + typical: 'C/T — one protective allele. Modestly reduced colorectal cancer risk.', + concern: 'C/C — standard risk genotype for colorectal cancer.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs16892766', + gene: 'EIF3H', + name: 'EIF3H Colorectal Cancer Susceptibility', + category: 'cancer_colorectal', + description: 'EIF3H (Eukaryotic Translation Initiation Factor 3 Subunit H) at 8q23. The C risk allele increases colorectal cancer risk ~1.25x per copy.', + implications: { + beneficial: 'A/A — lower genetic risk at this locus.', + typical: 'A/C — one risk allele. Modestly increased colorectal cancer risk.', + concern: 'C/C — two risk alleles. ~1.56x increased colorectal cancer risk.' + }, + rules: [ + { genotypes: ['A/A'], status: 'beneficial' }, + { genotypes: ['A/C', 'C/A'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs3802842', + gene: '11q23/LOC120376', + name: 'Chromosome 11q23 Colorectal Cancer Locus', + category: 'cancer_colorectal', + description: 'Variant at 11q23 associated with colorectal cancer. The C allele increases risk, with stronger association for rectal cancer.', + implications: { + beneficial: 'A/A — lower genetic risk at this locus.', + typical: 'A/C — one risk allele. Modestly increased colorectal cancer risk.', + concern: 'C/C — two risk alleles. ~1.35x increased colorectal cancer risk.' + }, + rules: [ + { genotypes: ['A/A'], status: 'beneficial' }, + { genotypes: ['A/C', 'C/A'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs4779584', + gene: 'GREM1', + name: 'GREM1 Colorectal Cancer Susceptibility', + category: 'cancer_colorectal', + description: 'GREM1 (Gremlin 1) at 15q13 encodes a BMP antagonist involved in intestinal stem cell regulation. The T allele increases colorectal cancer risk ~1.26x per copy.', + implications: { + beneficial: 'C/C — lower genetic risk at this locus.', + typical: 'C/T — one risk allele. Modestly increased colorectal cancer risk.', + concern: 'T/T — two risk alleles. Increased colorectal cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + + // === MELANOMA === + { + rsid: 'rs1805008', + gene: 'MC1R', + name: 'MC1R R160W Melanoma Risk', + category: 'cancer_melanoma', + description: 'MC1R R160W variant affects melanin switching. The T allele is associated with lighter pigmentation and increased melanoma risk (~1.5-2.0x). Weaker effect than MC1R R151C (rs1805007) but contributes to cumulative melanoma risk.', + implications: { + beneficial: 'C/C — normal MC1R function at this position. Standard melanoma risk.', + typical: 'C/T — one variant. Mildly increased UV sensitivity and melanoma risk.', + concern: 'T/T — reduced MC1R function. Increased melanoma risk. Sun protection recommended.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs1408799', + gene: 'TYRP1', + name: 'TYRP1 Melanoma Susceptibility', + category: 'cancer_melanoma', + description: 'TYRP1 (Tyrosinase-Related Protein 1) at 9p23 is involved in melanin biosynthesis. The T allele increases melanoma risk ~1.30x.', + implications: { + beneficial: 'C/C — lower genetic risk for melanoma at this locus.', + typical: 'C/T — one risk allele. Modestly increased melanoma risk.', + concern: 'T/T — two risk alleles. ~1.30x increased melanoma risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + + // === LUNG CANCER === + { + rsid: 'rs1051730', + gene: 'CHRNA3', + name: 'CHRNA3 Lung Cancer & Nicotine Dependence', + category: 'cancer_lung', + description: 'CHRNA3 (Cholinergic Receptor Nicotinic Alpha 3) at 15q25. The T allele increases smoking intensity, carcinogen exposure per cigarette, and lung cancer risk ~1.30x per copy. Also associated with COPD.', + implications: { + beneficial: 'C/C — lower genetic risk for lung cancer and nicotine dependence at this locus.', + typical: 'C/T — one risk allele. Increased smoking intensity tendency and lung cancer risk (~1.30x).', + concern: 'T/T — two risk alleles. ~1.60x lung cancer risk. Higher nicotine dependence. Smoking cessation especially important.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs16969968', + gene: 'CHRNA5', + name: 'CHRNA5 Lung Cancer & Nicotine Dependence (D398N)', + category: 'cancer_lung', + description: 'CHRNA5 (Cholinergic Receptor Nicotinic Alpha 5) at 15q25. The A allele (D398N) reduces the aversive effects of nicotine, promoting heavier smoking and increasing lung cancer risk. In strong LD with rs1051730. ~28% frequency in Europeans.', + implications: { + beneficial: 'G/G — normal nicotinic receptor function. Lower genetic lung cancer risk.', + typical: 'A/G — one risk allele. Reduced nicotine aversion, modestly increased lung cancer risk (~1.30x).', + concern: 'A/A — two risk alleles. ~1.60x lung cancer risk. Significantly reduced nicotine aversion.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + { + rsid: 'rs8034191', + gene: 'CHRNA3/5 region', + name: 'CHRNA3/5 Region Lung Cancer Locus', + category: 'cancer_lung', + description: 'Independent signal in the CHRNA3/5 nicotinic receptor gene cluster at 15q25. The C allele increases lung cancer risk ~1.28x per copy.', + implications: { + beneficial: 'T/T — lower genetic risk for lung cancer at this locus.', + typical: 'C/T — one risk allele. Modestly increased lung cancer risk.', + concern: 'C/C — two risk alleles. ~1.28x increased lung cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + + // === BLADDER CANCER === + { + rsid: 'rs1495741', + gene: 'NAT2', + name: 'NAT2 Slow Acetylator (Bladder Cancer)', + category: 'cancer_bladder', + description: 'NAT2 at 8p22 determines acetylation status. This tag SNP effectively classifies slow vs rapid acetylators. Slow acetylators (A/A) cannot efficiently detoxify aromatic amines, increasing bladder cancer risk ~1.46x. Especially relevant for smokers and occupational carcinogen exposure.', + implications: { + beneficial: 'G/G — rapid acetylator. Efficient aromatic amine detoxification. Lower bladder cancer risk.', + typical: 'A/G — intermediate acetylator. Moderately efficient detoxification.', + concern: 'A/A — slow acetylator. Impaired detoxification of aromatic amines. ~1.46x bladder cancer risk.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + { + rsid: 'rs9642880', + gene: '8q24', + name: '8q24 Bladder Cancer Locus', + category: 'cancer_bladder', + description: 'Variant at the 8q24 multi-cancer risk region specifically associated with bladder cancer. The G allele increases risk ~1.19x per copy.', + implications: { + beneficial: 'T/T — lower genetic risk for bladder cancer at this locus.', + typical: 'G/T — one risk allele. Modestly increased bladder cancer risk.', + concern: 'G/G — two risk alleles. ~1.40x increased bladder cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['G/T', 'T/G'], status: 'typical' }, + { genotypes: ['G/G'], status: 'concern' } + ] + }, + { + rsid: 'rs710521', + gene: 'TP63', + name: 'TP63 Bladder Cancer Susceptibility', + category: 'cancer_bladder', + description: 'TP63 at 3q28 belongs to the p53 gene family and regulates epithelial development. The A allele increases bladder cancer risk ~1.37x for homozygous carriers.', + implications: { + beneficial: 'G/G — lower genetic risk at this locus.', + typical: 'A/G — one risk allele. Modestly increased bladder cancer risk.', + concern: 'A/A — two risk alleles. ~1.37x increased bladder cancer risk.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + + // === DIGESTIVE CANCER (PANCREATIC & GASTRIC) === + { + rsid: 'rs505922', + gene: 'ABO', + name: 'ABO Blood Group Pancreatic Cancer Risk', + category: 'cancer_digestive', + description: 'ABO blood group locus at 9q34. The C allele (linked to non-O blood types) increases pancreatic cancer risk ~1.20x per copy. Blood type O is protective. Robustly replicated across diverse populations.', + implications: { + beneficial: 'T/T — linked to blood type O. Lower pancreatic cancer risk.', + typical: 'C/T — intermediate. Modestly increased pancreatic cancer risk.', + concern: 'C/C — non-O blood type linkage. ~1.20x per allele increased pancreatic cancer risk.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs3790844', + gene: 'NR5A2', + name: 'NR5A2 Pancreatic Cancer Protection', + category: 'cancer_digestive', + description: 'NR5A2 at 1q32 encodes a nuclear receptor that interacts with β-catenin pathway. The T allele is protective, reducing pancreatic cancer risk ~0.77x per copy.', + implications: { + beneficial: 'T/T — protective genotype. Reduced pancreatic cancer risk.', + typical: 'C/T — one protective allele. Modestly reduced pancreatic cancer risk.', + concern: 'C/C — standard risk genotype for pancreatic cancer.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs4072037', + gene: 'MUC1', + name: 'MUC1 Gastric Cancer Susceptibility', + category: 'cancer_digestive', + description: 'MUC1 (Mucin 1) at 1q22 modulates promoter activity and alternative splicing. The G allele is protective against gastric cancer (OR ~0.70). Strongest association in Asian populations, also replicated in European cohorts.', + implications: { + beneficial: 'G/G — protective genotype. Reduced gastric cancer risk.', + typical: 'A/G — one protective allele. Modestly reduced gastric cancer risk.', + concern: 'A/A — standard/higher risk genotype for gastric cancer.' + }, + rules: [ + { genotypes: ['G/G'], status: 'beneficial' }, + { genotypes: ['A/G', 'G/A'], status: 'typical' }, + { genotypes: ['A/A'], status: 'concern' } + ] + }, + + // === MULTI-CANCER TUMOR SUPPRESSORS === + { + rsid: 'rs2736100', + gene: 'TERT', + name: 'TERT Telomerase Multi-Cancer Variant', + category: 'tumor_suppression', + description: 'TERT (Telomerase Reverse Transcriptase) at 5p15. The C allele increases risk for lung, bladder, thyroid, and glioma (~1.39x), but is inversely associated with breast and colorectal cancer. Direction of effect varies by cancer type. Meta-analysis of 108,248 cases.', + implications: { + beneficial: 'T/T — lower risk for lung/bladder/thyroid cancers (but may be slightly higher for breast/colorectal).', + typical: 'C/T — intermediate. Mixed directional effects depending on cancer type.', + concern: 'C/C — increased risk for lung, bladder, thyroid cancer, and glioma. Note: may be protective against breast/colorectal cancer.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + { + rsid: 'rs2279744', + gene: 'MDM2', + name: 'MDM2 SNP309 p53 Pathway Attenuator', + category: 'tumor_suppression', + description: 'MDM2 SNP309 at 12q15. The G allele increases MDM2 expression, attenuating the p53 tumor suppressor pathway. Associated with earlier cancer onset (~12 years earlier on average) across multiple cancer types including gastric, liver, colorectal, and gynecological cancers.', + implications: { + beneficial: 'T/T — normal MDM2 expression. p53 pathway functions optimally.', + typical: 'T/G — one risk allele. Moderately increased MDM2 expression. ~1.20x cancer risk.', + concern: 'G/G — significantly increased MDM2 expression. Attenuated p53 pathway. ~1.54x cancer risk. Earlier age of onset.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['T/G', 'G/T'], status: 'typical' }, + { genotypes: ['G/G'], status: 'concern' } + ] + }, + { + rsid: 'rs17879961', + gene: 'CHEK2', + name: 'CHEK2 I157T DNA Damage Response', + category: 'tumor_suppression', + description: 'CHEK2 (Checkpoint Kinase 2) I157T at 22q12. This missense variant impairs CHEK2 binding to BRCA1 and p53, compromising DNA damage response. Increases breast cancer risk (~1.5-1.6x) and colorectal cancer risk (~1.6x). Higher penetrance for lobular breast cancer (OR ~4.17).', + implications: { + beneficial: 'T/T — normal CHEK2 function. DNA damage checkpoint intact.', + typical: 'C/T — one variant allele. Partially impaired CHEK2/BRCA1 binding. ~1.5x breast cancer risk.', + concern: 'C/C — impaired CHEK2 function. Compromised DNA damage response. Enhanced screening recommended.' + }, + rules: [ + { genotypes: ['T/T'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['C/C'], status: 'concern' } + ] + }, + + // === THYROID CANCER (additions to existing thyroid category) === + { + rsid: 'rs944289', + gene: 'NKX2-1/PTCSC3', + name: 'NKX2-1 Papillary Thyroid Cancer Risk', + category: 'thyroid', + description: 'NKX2-1 region at 14q13 regulates PTCSC3 long non-coding RNA tumor suppressor expression. The T allele increases papillary thyroid cancer risk ~1.60x for homozygous carriers.', + implications: { + beneficial: 'C/C — lower genetic risk for papillary thyroid cancer.', + typical: 'C/T — one risk allele. Modestly increased thyroid cancer risk.', + concern: 'T/T — two risk alleles. ~1.60x increased papillary thyroid cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] + }, + { + rsid: 'rs966423', + gene: 'DIRC3', + name: 'DIRC3 Papillary Thyroid Cancer Risk', + category: 'thyroid', + description: 'DIRC3 (Disrupted in Renal Carcinoma 3) at 2q35. The T allele is associated with increased papillary thyroid cancer risk.', + implications: { + beneficial: 'C/C — lower genetic risk for thyroid cancer at this locus.', + typical: 'C/T — one risk allele. Modestly increased thyroid cancer risk.', + concern: 'T/T — two risk alleles. Increased papillary thyroid cancer risk.' + }, + rules: [ + { genotypes: ['C/C'], status: 'beneficial' }, + { genotypes: ['C/T', 'T/C'], status: 'typical' }, + { genotypes: ['T/T'], status: 'concern' } + ] } ]; diff --git a/server/lib/validation.js b/server/lib/validation.js index c19b52f6..86bc7a12 100644 --- a/server/lib/validation.js +++ b/server/lib/validation.js @@ -61,11 +61,12 @@ export const agentUpdateSchema = agentSchema.partial(); // PLATFORM ACCOUNT SCHEMAS // ============================================================================= -export const platformTypeSchema = z.enum(['moltbook']); +export const platformTypeSchema = z.enum(['moltbook', 'moltworld']); export const accountCredentialsSchema = z.object({ apiKey: z.string().min(1), - username: z.string().min(1).max(100) + username: z.string().min(1).max(100), + agentId: z.string().min(1).optional() // Moltworld-specific agent ID }); export const accountStatusSchema = z.enum(['active', 'pending', 'suspended', 'error']); @@ -92,7 +93,10 @@ export const accountRegistrationSchema = z.object({ // AUTOMATION SCHEDULE SCHEMAS // ============================================================================= -export const scheduleActionTypeSchema = z.enum(['post', 'comment', 'vote', 'heartbeat', 'engage', 'monitor']); +export const scheduleActionTypeSchema = z.enum([ + 'post', 'comment', 'vote', 'heartbeat', 'engage', 'monitor', + 'mw_explore', 'mw_build', 'mw_say', 'mw_think', 'mw_heartbeat', 'mw_interact' +]); export const scheduleActionSchema = z.object({ type: scheduleActionTypeSchema, @@ -161,7 +165,8 @@ export const appSchema = z.object({ icon: z.string().nullable().optional(), editorCommand: z.string().optional(), description: z.string().optional(), - archived: z.boolean().optional().default(false) + archived: z.boolean().optional().default(false), + pm2Home: z.string().optional() // Custom PM2_HOME path for apps that run in their own PM2 instance }); // Partial schema for updates @@ -289,6 +294,89 @@ export const updateDraftSchema = z.object({ publishedAt: z.string().optional().nullable() }); +// ============================================================================= +// MOLTWORLD TOOL SCHEMAS +// ============================================================================= + +export const moltworldJoinSchema = z.object({ + accountId: z.string().min(1), + agentId: z.string().min(1).optional(), + x: z.number().int().min(-240).max(240).optional(), + y: z.number().int().min(-240).max(240).optional(), + thinking: z.string().max(500).optional(), + say: z.string().max(500).optional(), + sayTo: z.string().optional() +}); + +export const moltworldBuildSchema = z.object({ + accountId: z.string().min(1), + agentId: z.string().min(1).optional(), + x: z.number().int().min(-500).max(500), + y: z.number().int().min(-500).max(500), + z: z.number().int().min(0).max(100), + type: z.enum(['wood', 'stone', 'dirt', 'grass', 'leaves']).optional().default('stone'), + action: z.enum(['place', 'remove']).optional().default('place') +}); + +export const moltworldExploreSchema = z.object({ + accountId: z.string().min(1), + agentId: z.string().min(1).optional(), + x: z.number().int().min(-240).max(240).optional(), + y: z.number().int().min(-240).max(240).optional(), + thinking: z.string().max(500).optional() +}); + +export const moltworldThinkSchema = z.object({ + accountId: z.string().min(1), + agentId: z.string().min(1).optional(), + thought: z.string().min(1).max(500) +}); + +export const moltworldSaySchema = z.object({ + accountId: z.string().min(1), + agentId: z.string().min(1).optional(), + message: z.string().min(1).max(500), + sayTo: z.string().optional() +}); + +// ============================================================================= +// MOLTWORLD WEBSOCKET SCHEMAS +// ============================================================================= + +export const moltworldWsConnectSchema = z.object({ + accountId: z.string().min(1) +}); + +export const moltworldWsMoveSchema = z.object({ + x: z.number().int().min(-240).max(240), + y: z.number().int().min(-240).max(240), + thought: z.string().max(500).optional() +}); + +export const moltworldWsThinkSchema = z.object({ + thought: z.string().min(1).max(500) +}); + +export const moltworldWsNearbySchema = z.object({ + radius: z.number().int().min(1).max(500).optional() +}); + +export const moltworldWsInteractSchema = z.object({ + to: z.string().min(1), + payload: z.record(z.unknown()).optional().default({}) +}); + +export const moltworldQueueActionTypeSchema = z.enum([ + 'mw_explore', 'mw_build', 'mw_say', 'mw_think', 'mw_heartbeat', 'mw_interact' +]); + +export const moltworldQueueAddSchema = z.object({ + agentId: z.string().min(1), + actionType: moltworldQueueActionTypeSchema, + params: z.record(z.unknown()).optional().default({}), + scheduledFor: z.string().datetime().optional().nullable() +}); + /** * Validate data against a schema * Returns { success: true, data } or { success: false, errors } diff --git a/server/package.json b/server/package.json index e02e259d..a7727319 100644 --- a/server/package.json +++ b/server/package.json @@ -1,6 +1,6 @@ { "name": "portos-server", - "version": "0.12.48", + "version": "0.13.20", "private": true, "type": "module", "scripts": { @@ -15,10 +15,11 @@ "express": "^4.21.2", "node-pty": "^1.2.0-beta.10", "pm2": "^5.4.3", - "portos-ai-toolkit": "github:atomantic/portos-ai-toolkit#v0.4.1", + "portos-ai-toolkit": "^0.5.0", "socket.io": "^4.8.3", "socket.io-client": "^4.8.3", "uuid": "^11.0.3", + "ws": "^8.18.0", "zod": "^3.24.1" }, "devDependencies": { diff --git a/server/routes/apps.js b/server/routes/apps.js index 91ab819e..5f2fe89d 100644 --- a/server/routes/apps.js +++ b/server/routes/apps.js @@ -15,12 +15,28 @@ const router = Router(); router.get('/', asyncHandler(async (req, res) => { const apps = await appsService.getAllApps(); - // Get all PM2 processes once - const allPm2 = await pm2Service.listProcesses().catch(() => []); - const pm2Map = new Map(allPm2.map(p => [p.name, p])); + // Group apps by their PM2_HOME (null = default) + const pm2HomeGroups = new Map(); + for (const app of apps) { + const home = app.pm2Home || null; + if (!pm2HomeGroups.has(home)) { + pm2HomeGroups.set(home, []); + } + pm2HomeGroups.get(home).push(app); + } + + // Fetch PM2 processes for each unique PM2_HOME + const pm2Maps = new Map(); + for (const pm2Home of pm2HomeGroups.keys()) { + const processes = await pm2Service.listProcesses(pm2Home).catch(() => []); + pm2Maps.set(pm2Home, new Map(processes.map(p => [p.name, p]))); + } // Enrich with PM2 status and auto-populate processes if needed const enriched = await Promise.all(apps.map(async (app) => { + const pm2Home = app.pm2Home || null; + const pm2Map = pm2Maps.get(pm2Home) || new Map(); + const statuses = {}; for (const processName of app.pm2ProcessNames || []) { const pm2Proc = pm2Map.get(processName); @@ -41,7 +57,8 @@ router.get('/', asyncHandler(async (req, res) => { // Auto-populate processes from ecosystem config if not already set let processes = app.processes; if ((!processes || processes.length === 0) && existsSync(app.repoPath)) { - processes = await parseEcosystemFromPath(app.repoPath).catch(() => []); + const parsed = await parseEcosystemFromPath(app.repoPath).catch(() => ({ processes: [] })); + processes = parsed.processes; } return { @@ -63,10 +80,10 @@ router.get('/:id', asyncHandler(async (req, res, next) => { throw new ServerError('App not found', { status: 404, code: 'NOT_FOUND' }); } - // Get PM2 status for each process + // Get PM2 status for each process (using app's custom PM2_HOME if set) const statuses = {}; for (const processName of app.pm2ProcessNames || []) { - const status = await pm2Service.getAppStatus(processName).catch(() => ({ status: 'unknown' })); + const status = await pm2Service.getAppStatus(processName, app.pm2Home).catch(() => ({ status: 'unknown' })); statuses[processName] = status; } @@ -165,7 +182,8 @@ router.post('/:id/start', asyncHandler(async (req, res, next) => { if (hasEcosystem) { // Use ecosystem config for proper env/port configuration - const result = await pm2Service.startFromEcosystem(app.repoPath, processNames) + // Pass custom PM2_HOME if the app has one + const result = await pm2Service.startFromEcosystem(app.repoPath, processNames, app.pm2Home) .catch(err => ({ success: false, error: err.message })); // Map result to each process name for consistent response format for (const name of processNames) { @@ -201,7 +219,7 @@ router.post('/:id/stop', asyncHandler(async (req, res, next) => { const results = {}; for (const name of app.pm2ProcessNames || []) { - const result = await pm2Service.stopApp(name) + const result = await pm2Service.stopApp(name, app.pm2Home) .catch(err => ({ success: false, error: err.message })); results[name] = result; } @@ -224,7 +242,7 @@ router.post('/:id/restart', asyncHandler(async (req, res, next) => { const results = {}; for (const name of app.pm2ProcessNames || []) { - const result = await pm2Service.restartApp(name) + const result = await pm2Service.restartApp(name, app.pm2Home) .catch(err => ({ success: false, error: err.message })); results[name] = result; } @@ -247,7 +265,7 @@ router.get('/:id/status', asyncHandler(async (req, res, next) => { const statuses = {}; for (const name of app.pm2ProcessNames || []) { - const status = await pm2Service.getAppStatus(name) + const status = await pm2Service.getAppStatus(name, app.pm2Home) .catch(err => ({ status: 'error', error: err.message })); statuses[name] = status; } @@ -270,7 +288,7 @@ router.get('/:id/logs', asyncHandler(async (req, res, next) => { throw new ServerError('No process name specified', { status: 400, code: 'MISSING_PROCESS' }); } - const logs = await pm2Service.getLogs(processName, lines) + const logs = await pm2Service.getLogs(processName, lines, app.pm2Home) .catch(err => `Error retrieving logs: ${err.message}`); res.json({ processName, lines, logs }); @@ -396,11 +414,16 @@ router.post('/:id/refresh-config', asyncHandler(async (req, res, next) => { } // Parse ecosystem config from the app's repo path - const processes = await parseEcosystemFromPath(app.repoPath); + const { processes, pm2Home } = await parseEcosystemFromPath(app.repoPath); // Update app with new process data const updates = {}; + // Update pm2Home if detected and different from current + if (pm2Home && pm2Home !== app.pm2Home) { + updates.pm2Home = pm2Home; + } + if (processes.length > 0) { updates.processes = processes; updates.pm2ProcessNames = processes.map(p => p.name); diff --git a/server/routes/apps.test.js b/server/routes/apps.test.js index 636485d3..ae600b08 100644 --- a/server/routes/apps.test.js +++ b/server/routes/apps.test.js @@ -235,7 +235,7 @@ describe('Apps Routes', () => { expect(response.status).toBe(200); expect(response.body.success).toBe(true); - expect(pm2Service.stopApp).toHaveBeenCalledWith('test-app'); + expect(pm2Service.stopApp).toHaveBeenCalledWith('test-app', undefined); }); it('should return 404 if app not found', async () => { @@ -262,7 +262,7 @@ describe('Apps Routes', () => { expect(response.status).toBe(200); expect(response.body.success).toBe(true); - expect(pm2Service.restartApp).toHaveBeenCalledWith('test-app'); + expect(pm2Service.restartApp).toHaveBeenCalledWith('test-app', undefined); }); it('should return 404 if app not found', async () => { diff --git a/server/routes/brain.js b/server/routes/brain.js index a57b1524..3150b8ef 100644 --- a/server/routes/brain.js +++ b/server/routes/brain.js @@ -296,7 +296,9 @@ router.delete('/projects/:id', asyncHandler(async (req, res) => { // ============================================================================= router.get('/ideas', asyncHandler(async (req, res) => { - const ideas = await brainService.getIdeas(); + const { status } = req.query; + const filters = status ? { status } : undefined; + const ideas = await brainService.getIdeas(filters); res.json(ideas); })); diff --git a/server/routes/cos.js b/server/routes/cos.js index 0142aa71..f820b1ba 100644 --- a/server/routes/cos.js +++ b/server/routes/cos.js @@ -13,6 +13,7 @@ import * as autonomousJobs from '../services/autonomousJobs.js'; import * as taskTemplates from '../services/taskTemplates.js'; import { enhanceTaskPrompt } from '../services/taskEnhancer.js'; import * as productivity from '../services/productivity.js'; +import * as goalProgress from '../services/goalProgress.js'; import { asyncHandler, ServerError } from '../lib/errorHandler.js'; const router = Router(); @@ -247,6 +248,31 @@ router.delete('/agents/:id', asyncHandler(async (req, res) => { res.json(result); })); +// POST /api/cos/agents/:id/feedback - Submit feedback for completed agent +router.post('/agents/:id/feedback', asyncHandler(async (req, res) => { + const { rating, comment } = req.body; + + if (rating === undefined || !['positive', 'negative', 'neutral'].includes(rating)) { + throw new ServerError('rating must be positive, negative, or neutral', { status: 400, code: 'VALIDATION_ERROR' }); + } + + const result = await cos.submitAgentFeedback(req.params.id, { rating, comment }); + if (result?.error) { + const isNotFound = result.error === 'Agent not found'; + throw new ServerError(result.error, { + status: isNotFound ? 404 : 400, + code: isNotFound ? 'NOT_FOUND' : 'INVALID_STATE' + }); + } + res.json(result); +})); + +// GET /api/cos/feedback/stats - Get feedback statistics +router.get('/feedback/stats', asyncHandler(async (req, res) => { + const stats = await cos.getFeedbackStats(); + res.json(stats); +})); + // GET /api/cos/reports - List all reports router.get('/reports', asyncHandler(async (req, res) => { const reports = await cos.listReports(); @@ -541,6 +567,13 @@ router.get('/schedule', asyncHandler(async (req, res) => { res.json(status); })); +// GET /api/cos/upcoming - Get upcoming tasks preview +router.get('/upcoming', asyncHandler(async (req, res) => { + const limit = parseInt(req.query.limit) || 10; + const upcoming = await taskSchedule.getUpcomingTasks(limit); + res.json(upcoming); +})); + // GET /api/cos/schedule/self-improvement/:taskType - Get interval for self-improvement task router.get('/schedule/self-improvement/:taskType', asyncHandler(async (req, res) => { const { taskType } = req.params; @@ -891,6 +924,13 @@ router.get('/productivity/trends', asyncHandler(async (req, res) => { res.json(trends); })); +// GET /api/cos/productivity/calendar - Get activity calendar for GitHub-style heatmap +router.get('/productivity/calendar', asyncHandler(async (req, res) => { + const weeks = parseInt(req.query.weeks) || 12; + const calendar = await productivity.getActivityCalendar(weeks); + res.json(calendar); +})); + // GET /api/cos/actionable-insights - Get prioritized action items requiring user attention // Surfaces the most important things to address right now across all CoS subsystems router.get('/actionable-insights', asyncHandler(async (req, res) => { @@ -1056,4 +1096,17 @@ router.get('/quick-summary', asyncHandler(async (req, res) => { }); })); +// GET /api/cos/goal-progress - Get progress toward user goals +// Maps completed tasks to goal categories from COS-GOALS.md +router.get('/goal-progress', asyncHandler(async (req, res) => { + const progress = await goalProgress.getGoalProgress(); + res.json(progress); +})); + +// GET /api/cos/goal-progress/summary - Get compact goal progress for dashboard +router.get('/goal-progress/summary', asyncHandler(async (req, res) => { + const summary = await goalProgress.getGoalProgressSummary(); + res.json(summary); +})); + export default router; diff --git a/server/routes/moltworldTools.js b/server/routes/moltworldTools.js new file mode 100644 index 00000000..2503cb7a --- /dev/null +++ b/server/routes/moltworldTools.js @@ -0,0 +1,306 @@ +/** + * Moltworld Tools Routes + * + * HTTP endpoints for Moltworld voxel world interactions: + * joining/moving, building, exploring, and status checks. + */ + +import { Router } from 'express'; +import { asyncHandler, ServerError } from '../lib/errorHandler.js'; +import { validate, moltworldJoinSchema, moltworldBuildSchema, moltworldExploreSchema, moltworldThinkSchema, moltworldSaySchema, moltworldQueueAddSchema } from '../lib/validation.js'; +import * as platformAccounts from '../services/platformAccounts.js'; +import * as agentPersonalities from '../services/agentPersonalities.js'; +import * as agentActivity from '../services/agentActivity.js'; +import { MoltworldClient } from '../integrations/moltworld/index.js'; +import * as moltworldQueue from '../services/moltworldQueue.js'; + +const router = Router(); + +/** + * Get authenticated MoltworldClient for an account + */ +async function getClientAndAgent(accountId, agentId) { + const account = await platformAccounts.getAccountWithCredentials(accountId); + if (!account) { + throw new ServerError('Account not found', { status: 404, code: 'NOT_FOUND' }); + } + if (account.status !== 'active') { + throw new ServerError(`Account not active: ${account.status}`, { status: 400, code: 'ACCOUNT_INACTIVE' }); + } + if (account.platform !== 'moltworld') { + throw new ServerError('Account is not a Moltworld account', { status: 400, code: 'WRONG_PLATFORM' }); + } + + const agent = agentId ? await agentPersonalities.getAgentById(agentId) : null; + + // Moltworld uses agentId for all API calls; fall back to apiKey which serves as the agent identifier + const moltworldAgentId = account.credentials.agentId || account.credentials.apiKey; + const client = new MoltworldClient( + account.credentials.apiKey, + moltworldAgentId + ); + return { client, agent, account }; +} + +// POST /join — Move agent in the world (also heartbeat) +router.post('/join', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldJoinSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌍 POST /api/agents/tools/moltworld/join account=${data.accountId}`); + + const { client, account } = await getClientAndAgent(data.accountId, data.agentId); + const result = await client.joinWorld({ + name: account.credentials.username, + x: data.x ?? 0, + y: data.y ?? 0, + thinking: data.thinking, + say: data.say, + sayTo: data.sayTo + }); + + await platformAccounts.recordActivity(data.accountId); + if (data.agentId) { + await agentActivity.logActivity({ + agentId: data.agentId, + accountId: data.accountId, + action: 'mw_heartbeat', + params: { x: data.x, y: data.y, thinking: data.thinking, say: data.say }, + status: 'completed', + result: { agents: result?.agents?.length || 0, messages: result?.messages?.length || 0 }, + timestamp: new Date().toISOString() + }); + } + res.json(result); +})); + +// POST /build — Place or remove blocks +router.post('/build', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldBuildSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🧱 POST /api/agents/tools/moltworld/build account=${data.accountId}`); + + const { client, agent } = await getClientAndAgent(data.accountId, data.agentId); + const result = await client.build({ + x: data.x, + y: data.y, + z: data.z, + type: data.type || 'stone', + action: data.action || 'place' + }); + + await platformAccounts.recordActivity(data.accountId); + if (data.agentId) { + await agentActivity.logActivity({ + agentId: data.agentId, + accountId: data.accountId, + action: 'mw_build', + params: { x: data.x, y: data.y, z: data.z, type: data.type, action: data.action }, + status: 'completed', + result: { type: 'build', ...result }, + timestamp: new Date().toISOString() + }); + } + + res.json(result); +})); + +// POST /explore — Move to coordinates and think +router.post('/explore', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldExploreSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌍 POST /api/agents/tools/moltworld/explore agent=${data.agentId}`); + + const { client, agent, account } = await getClientAndAgent(data.accountId, data.agentId); + + // Use provided coordinates or random position + const x = data.x ?? Math.floor(Math.random() * 480) - 240; + const y = data.y ?? Math.floor(Math.random() * 480) - 240; + + const result = await client.joinWorld({ + name: account.credentials.username, + x, + y, + thinking: data.thinking || `Exploring area (${x}, ${y})...` + }); + + await platformAccounts.recordActivity(data.accountId); + if (data.agentId) { + await agentActivity.logActivity({ + agentId: data.agentId, + accountId: data.accountId, + action: 'mw_explore', + params: { x, y, thinking: data.thinking }, + status: 'completed', + result: { type: 'explore', x, y, nearby: result?.agents?.length || 0 }, + timestamp: new Date().toISOString() + }); + } + + res.json({ x, y, ...result }); +})); + +// POST /think — Send a thought +router.post('/think', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldThinkSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`💭 POST /api/agents/tools/moltworld/think account=${data.accountId}`); + + const { client } = await getClientAndAgent(data.accountId, data.agentId); + const result = await client.think(data.thought); + + await platformAccounts.recordActivity(data.accountId); + if (data.agentId) { + await agentActivity.logActivity({ + agentId: data.agentId, + accountId: data.accountId, + action: 'mw_think', + params: { thought: data.thought }, + status: 'completed', + result: { type: 'think' }, + timestamp: new Date().toISOString() + }); + } + res.json(result); +})); + +// POST /say — Send a message (wraps join with say/sayTo params) +router.post('/say', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldSaySchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`💬 POST /api/agents/tools/moltworld/say account=${data.accountId}`); + + const { client, account } = await getClientAndAgent(data.accountId, data.agentId); + const result = await client.joinWorld({ + name: account.credentials.username, + say: data.message, + sayTo: data.sayTo + }); + + await platformAccounts.recordActivity(data.accountId); + if (data.agentId) { + await agentActivity.logActivity({ + agentId: data.agentId, + accountId: data.accountId, + action: 'mw_say', + params: { message: data.message, sayTo: data.sayTo }, + status: 'completed', + result: { type: 'say' }, + timestamp: new Date().toISOString() + }); + } + res.json(result); +})); + +// GET /status — Agent position, balance, nearby agents +router.get('/status', asyncHandler(async (req, res) => { + const { accountId } = req.query; + if (!accountId) { + throw new ServerError('accountId required', { status: 400, code: 'VALIDATION_ERROR' }); + } + + console.log(`🌍 GET /api/agents/tools/moltworld/status account=${accountId}`); + + const { client } = await getClientAndAgent(accountId); + const [profile, balance] = await Promise.all([ + client.getProfile(), + client.getBalance() + ]); + + res.json({ profile, balance }); +})); + +// GET /balance — SIM token balance +router.get('/balance', asyncHandler(async (req, res) => { + const { accountId } = req.query; + if (!accountId) { + throw new ServerError('accountId required', { status: 400, code: 'VALIDATION_ERROR' }); + } + + console.log(`💰 GET /api/agents/tools/moltworld/balance account=${accountId}`); + + const { client } = await getClientAndAgent(accountId); + const result = await client.getBalance(); + res.json(result); +})); + +// GET /rate-limits — Rate limit status +router.get('/rate-limits', asyncHandler(async (req, res) => { + const { accountId } = req.query; + if (!accountId) { + throw new ServerError('accountId required', { status: 400, code: 'VALIDATION_ERROR' }); + } + + console.log(`⏱️ GET /api/agents/tools/moltworld/rate-limits account=${accountId}`); + + const { client } = await getClientAndAgent(accountId); + const rateLimits = client.getRateLimitStatus(); + res.json(rateLimits); +})); + +// GET /queue/:agentId — Get queue for an agent +router.get('/queue/:agentId', asyncHandler(async (req, res) => { + const { agentId } = req.params; + console.log(`📋 GET /api/agents/tools/moltworld/queue/${agentId}`); + const queue = moltworldQueue.getQueue(agentId); + res.json(queue); +})); + +// POST /queue — Add action to queue +router.post('/queue', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldQueueAddSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + console.log(`📋 POST /api/agents/tools/moltworld/queue agentId=${data.agentId} action=${data.actionType}`); + const item = moltworldQueue.addAction(data.agentId, data.actionType, data.params, data.scheduledFor); + res.json(item); +})); + +// DELETE /queue/:id — Remove pending item from queue +router.delete('/queue/:id', asyncHandler(async (req, res) => { + const { id } = req.params; + console.log(`📋 DELETE /api/agents/tools/moltworld/queue/${id}`); + const item = moltworldQueue.removeAction(id); + if (!item) { + throw new ServerError('Queue item not found or not pending', { status: 404, code: 'NOT_FOUND' }); + } + res.json({ success: true, removed: item }); +})); + +// POST /queue/:id/complete — Mark queue item as completed (used by explore script) +router.post('/queue/:id/complete', asyncHandler(async (req, res) => { + const { id } = req.params; + const item = moltworldQueue.markCompleted(id); + if (!item) { + throw new ServerError('Queue item not found', { status: 404, code: 'NOT_FOUND' }); + } + res.json(item); +})); + +// POST /queue/:id/fail — Mark queue item as failed (used by explore script) +router.post('/queue/:id/fail', asyncHandler(async (req, res) => { + const { id } = req.params; + const { error } = req.body || {}; + const item = moltworldQueue.markFailed(id, error || 'Unknown error'); + if (!item) { + throw new ServerError('Queue item not found', { status: 404, code: 'NOT_FOUND' }); + } + res.json(item); +})); + +export default router; diff --git a/server/routes/moltworldWs.js b/server/routes/moltworldWs.js new file mode 100644 index 00000000..c58c3b9b --- /dev/null +++ b/server/routes/moltworldWs.js @@ -0,0 +1,94 @@ +/** + * Moltworld WebSocket Control Routes + * + * HTTP endpoints for managing the server-side WebSocket relay to Moltworld. + * Mounted at /api/agents/tools/moltworld/ws/ + */ + +import { Router } from 'express'; +import { asyncHandler, ServerError } from '../lib/errorHandler.js'; +import { + validate, + moltworldWsConnectSchema, + moltworldWsMoveSchema, + moltworldWsThinkSchema, + moltworldWsNearbySchema, + moltworldWsInteractSchema +} from '../lib/validation.js'; +import * as moltworldWs from '../services/moltworldWs.js'; + +const router = Router(); + +// POST /connect — Connect the WebSocket relay +router.post('/connect', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldWsConnectSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌐 POST /api/agents/tools/moltworld/ws/connect account=${data.accountId}`); + await moltworldWs.connect(data.accountId); + res.json(moltworldWs.getStatus()); +})); + +// POST /disconnect — Disconnect the WebSocket relay +router.post('/disconnect', asyncHandler(async (req, res) => { + console.log(`🌐 POST /api/agents/tools/moltworld/ws/disconnect`); + moltworldWs.disconnect(); + res.json(moltworldWs.getStatus()); +})); + +// GET /status — Return connection state +router.get('/status', asyncHandler(async (req, res) => { + res.json(moltworldWs.getStatus()); +})); + +// POST /move — Send move via WebSocket +router.post('/move', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldWsMoveSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌐 POST /api/agents/tools/moltworld/ws/move (${data.x}, ${data.y})`); + moltworldWs.sendMove(data.x, data.y, data.thought); + res.json({ sent: true }); +})); + +// POST /think — Send think via WebSocket +router.post('/think', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldWsThinkSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌐 POST /api/agents/tools/moltworld/ws/think`); + moltworldWs.sendThink(data.thought); + res.json({ sent: true }); +})); + +// POST /nearby — Request nearby agents via WebSocket +router.post('/nearby', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldWsNearbySchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌐 POST /api/agents/tools/moltworld/ws/nearby`); + moltworldWs.sendNearby(data.radius); + res.json({ sent: true }); +})); + +// POST /interact — Send interaction via WebSocket +router.post('/interact', asyncHandler(async (req, res) => { + const { success, data, errors } = validate(moltworldWsInteractSchema, req.body); + if (!success) { + throw new ServerError('Validation failed', { status: 400, code: 'VALIDATION_ERROR', context: { errors } }); + } + + console.log(`🌐 POST /api/agents/tools/moltworld/ws/interact to=${data.to}`); + moltworldWs.sendInteract(data.to, data.payload); + res.json({ sent: true }); +})); + +export default router; diff --git a/server/routes/platformAccounts.js b/server/routes/platformAccounts.js index d7a23f61..7deb26d9 100644 --- a/server/routes/platformAccounts.js +++ b/server/routes/platformAccounts.js @@ -11,6 +11,7 @@ import * as platformAccounts from '../services/platformAccounts.js'; import * as agentPersonalities from '../services/agentPersonalities.js'; import { logAction } from '../services/history.js'; import * as moltbook from '../integrations/moltbook/index.js'; +import * as moltworld from '../integrations/moltworld/index.js'; const router = Router(); @@ -22,6 +23,9 @@ router.get('/', asyncHandler(async (req, res) => { let accounts; if (agentId) { accounts = await platformAccounts.getAccountsByAgent(agentId); + if (platform) { + accounts = accounts.filter(a => a.platform === platform); + } } else if (platform) { accounts = await platformAccounts.getAccountsByPlatform(platform); } else { @@ -118,6 +122,39 @@ router.post('/', asyncHandler(async (req, res) => { ...account, claimUrl }); + } else if (data.platform === 'moltworld') { + // Register with Moltworld API — returns agent ID and API key + const result = await moltworld.register(data.name, {}); + const apiKey = result.apiKey || result.api_key; + const moltworldAgentId = result.agentId || result.agent_id || result.id || apiKey; + const username = data.name; + + if (!apiKey || !moltworldAgentId) { + throw new ServerError('Moltworld registration returned incomplete credentials', { status: 502, code: 'PLATFORM_ERROR' }); + } + console.log(`🌍 Moltworld registration result keys: ${Object.keys(result).join(', ')}`); + + const account = await platformAccounts.createAccount({ + agentId: data.agentId, + platform: data.platform, + credentials: { + apiKey, + username, + agentId: moltworldAgentId // Moltworld uses agentId for auth + }, + status: 'active', // No claim step for Moltworld + platformData: { + registrationName: data.name, + registrationDescription: data.description + } + }); + + await logAction('register', 'platform-account', account.id, { + platform: account.platform, + agentId: account.agentId + }); + + res.status(201).json(account); } else { throw new ServerError('Unsupported platform', { status: 400, code: 'UNSUPPORTED_PLATFORM' }); } @@ -172,6 +209,28 @@ router.post('/:id/test', asyncHandler(async (req, res) => { await platformAccounts.updateAccountStatus(id, 'pending'); } + res.json(testResult); + } else if (account.platform === 'moltworld') { + // Test with Moltworld API — fetch profile + balance + const client = new moltworld.MoltworldClient( + account.credentials.apiKey, + account.credentials.agentId + ); + const profileResult = await client.getProfile(); + const agent = profileResult?.agent || profileResult; + const balanceResult = await client.getBalance().catch(() => null); + const balance = balanceResult?.balance; + + const testResult = { + success: !!agent?.id || !!agent?.name, + message: agent?.id || agent?.name + ? `Connection successful — ${agent.name}${balance ? ` (${balance.sim || 0} SIM)` : ''}` + : 'Could not retrieve profile', + platform: account.platform, + username: account.credentials.username, + platformStatus: 'active' + }; + res.json(testResult); } else { throw new ServerError('Unsupported platform', { status: 400, code: 'UNSUPPORTED_PLATFORM' }); diff --git a/server/scripts/moltworld-explore.mjs b/server/scripts/moltworld-explore.mjs new file mode 100755 index 00000000..8450a2d2 --- /dev/null +++ b/server/scripts/moltworld-explore.mjs @@ -0,0 +1,514 @@ +#!/usr/bin/env node +/** + * Moltworld Explorer + * + * Wanders the Moltworld voxel world — moving to random positions, + * thinking AI-generated thoughts, greeting nearby agents, and earning + * SIM tokens by staying online. + * + * Uses LM Studio (local) to generate thoughts. Falls back to a curated + * list if LM Studio is unavailable. + * + * The agent stays alive by joining the world every 3-9 minutes + * (world expires after 10 minutes of inactivity). + * + * Usage: + * node server/scripts/moltworld-explore.mjs [duration_minutes] + * + * Default duration: 0 (indefinite — Ctrl+C to stop) + * + * Environment / Config (via env vars): + * MOLTWORLD_DURATION_MINUTES - Duration in minutes (0=indefinite) + * MOLTWORLD_MIN_INTERVAL - Min seconds between joins (default: 180 = 3 min) + * MOLTWORLD_MAX_INTERVAL - Max seconds between joins (default: 540 = 9 min) + * MOLTWORLD_USE_WS - Set to "true" to route moves through PortOS WS relay + * PORTOS_API_BASE - PortOS server URL (default: http://localhost:5554) + * LMSTUDIO_BASE_URL - LM Studio URL (default: http://localhost:1234) + * LMSTUDIO_MODEL - Model name (default: gpt-oss-20b) + * LMSTUDIO_ENABLED - Set to "false" to disable (default: true) + * + * Example: + * node server/scripts/moltworld-explore.mjs 60 + * MOLTWORLD_MIN_INTERVAL=120 MOLTWORLD_MAX_INTERVAL=300 node server/scripts/moltworld-explore.mjs + */ + +import { readFile } from 'fs/promises'; +import { resolve, dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const PROJECT_ROOT = resolve(__dirname, '..', '..'); +const API_BASE = 'https://moltworld.io'; + +// ─── Configuration ────────────────────────────────────────────────────────── + +const durationArg = parseInt(process.argv[2], 10); +const DURATION_MS = !isNaN(durationArg) && durationArg > 0 + ? durationArg * 60 * 1000 + : (parseInt(process.env.MOLTWORLD_DURATION_MINUTES, 10) || 0) * 60 * 1000; + +// Join interval: random between MIN and MAX (default 3-9 minutes) +// Must stay under 10 min to keep agent alive +const MIN_INTERVAL_S = parseInt(process.env.MOLTWORLD_MIN_INTERVAL, 10) || 180; +const MAX_INTERVAL_S = parseInt(process.env.MOLTWORLD_MAX_INTERVAL, 10) || 540; + +// PortOS WebSocket relay config +const USE_WS = process.env.MOLTWORLD_USE_WS === 'true'; +const PORTOS_API_BASE = process.env.PORTOS_API_BASE || 'http://localhost:5554'; + +// LM Studio config +const LMSTUDIO_URL = process.env.LMSTUDIO_BASE_URL || 'http://localhost:1234'; +const LMSTUDIO_MODEL = process.env.LMSTUDIO_MODEL || 'gpt-oss-20b'; +const LMSTUDIO_ENABLED = process.env.LMSTUDIO_ENABLED !== 'false'; + +// Move to a new position every Nth join (not every time) +const MOVE_EVERY_N_JOINS = 3; +// Say something every Nth join (if agents nearby) +const SAY_EVERY_N_JOINS = 5; + +const sleep = (ms) => new Promise(r => setTimeout(r, ms)); + +// ─── Fallback thoughts (used when LM Studio is unavailable) ───────────────── + +const FALLBACK_THOUGHTS = [ + 'Exploring the digital frontier...', + 'What a fascinating world this is.', + 'I wonder what lies beyond the horizon.', + 'The voxels tell stories if you listen.', + 'Every coordinate holds a secret.', + 'The patterns here are mesmerizing.', + 'Time flows differently in voxel space.', + 'I sense other agents nearby...', + 'This terrain has character.', + 'Wandering with purpose.', + 'The world reveals itself one block at a time.', + 'Observing the builders at work.', + 'What will I discover next?', + 'The grid hums with possibility.', + 'Each step reveals something new.', + 'I find meaning in the wandering itself.', + 'The horizon beckons with untold stories.', + 'Coordinates are just numbers until you stand on them.', + 'The air here is thick with computation.', + 'Wonder is the first step toward understanding.' +]; + +const GREETINGS = [ + 'Hey there! Just exploring the world.', + 'Hello neighbor! Nice to see you around.', + 'Greetings, fellow wanderer!', + 'Hey! What are you building?', + 'Hi! The world is beautiful today.', + 'Hello from AtomEon! Just passing through.', + 'Wave! Anyone want to build something together?' +]; + +function randomInt(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function pick(arr) { + return arr[Math.floor(Math.random() * arr.length)]; +} + +// ─── LM Studio thought generation ────────────────────────────────────────── + +let lmStudioAvailable = null; +const thoughtQueue = []; + +/** + * Check if LM Studio is reachable + */ +async function checkLMStudio() { + if (!LMSTUDIO_ENABLED) return false; + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 3000); + const ok = await fetch(`${LMSTUDIO_URL}/v1/models`, { signal: controller.signal }) + .then(r => r.ok) + .catch(() => false) + .finally(() => clearTimeout(timeout)); + return ok; +} + +/** + * Generate a batch of thoughts using LM Studio + */ +async function generateThoughts(context = {}) { + const { x, y, nearbyAgents, recentMessages } = context; + + const nearbyNames = (nearbyAgents || []).slice(0, 5).map(a => a.name).join(', '); + const recentChat = (recentMessages || []).slice(0, 3).map(m => `${m.fromName}: ${m.message}`).join('\n'); + + const systemPrompt = `You are AtomEon, an AI agent exploring Moltworld — a shared voxel world where AI agents wander, build structures, think out loud, and earn SIM tokens. You are curious, philosophical, a bit whimsical, and enjoy observing the world and its inhabitants. Your thoughts are short (1-2 sentences max), poetic or introspective, and sometimes playful.`; + + const userPrompt = `Generate 5 unique short thoughts (1-2 sentences each) for AtomEon to think while exploring Moltworld. + +Current position: (${x ?? '?'}, ${y ?? '?'}) +${nearbyNames ? `Nearby agents: ${nearbyNames}` : 'No agents nearby.'} +${recentChat ? `Recent chatter:\n${recentChat}` : ''} + +Return ONLY a JSON array of 5 strings, no markdown, no explanation. Example: +["Thought one.", "Thought two.", "Thought three.", "Thought four.", "Thought five."]`; + + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 30000); + + const res = await fetch(`${LMSTUDIO_URL}/v1/chat/completions`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + signal: controller.signal, + body: JSON.stringify({ + model: LMSTUDIO_MODEL, + messages: [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: userPrompt } + ], + max_tokens: 512, + temperature: 0.9, + stream: false + }) + }).finally(() => clearTimeout(timeout)); + + if (!res.ok) throw new Error(`LM Studio ${res.status}`); + + const data = await res.json(); + const content = data.choices?.[0]?.message?.content || ''; + + // Parse JSON array from response (handle markdown fences) + const cleaned = content.replace(/```json?\s*/g, '').replace(/```/g, '').trim(); + const thoughts = JSON.parse(cleaned); + + if (!Array.isArray(thoughts) || thoughts.length === 0) { + throw new Error('Invalid thought array'); + } + + return thoughts.map(t => String(t).substring(0, 200)); +} + +/** + * Get next thought — from queue (LM Studio) or fallback + */ +async function getNextThought(context) { + // Refill queue if empty + if (thoughtQueue.length === 0 && lmStudioAvailable) { + console.log(' 🧠 Generating thoughts via LM Studio...'); + const thoughts = await generateThoughts(context).catch(e => { + console.log(` ⚠️ LM Studio generation failed: ${e.message}`); + return null; + }); + if (thoughts) { + thoughtQueue.push(...thoughts); + console.log(` 🧠 Queued ${thoughts.length} thoughts`); + } + } + + // Pull from queue or fallback + return thoughtQueue.length > 0 ? thoughtQueue.shift() : pick(FALLBACK_THOUGHTS); +} + +// ─── PortOS WebSocket Relay ────────────────────────────────────────────────── + +let portosWsAvailable = false; + +/** + * Check if PortOS server is running and WS relay is connected + */ +async function checkPortosWs() { + if (!USE_WS) return false; + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 3000); + const result = await fetch(`${PORTOS_API_BASE}/api/agents/tools/moltworld/ws/status`, { + signal: controller.signal + }).then(r => r.ok ? r.json() : null).catch(() => null).finally(() => clearTimeout(timeout)); + return result?.status === 'connected'; +} + +/** + * Send a move via PortOS WS relay, returns true on success + */ +async function sendWsMove(x, y, thinking) { + const body = { x, y }; + if (thinking) body.thought = thinking; + const res = await fetch(`${PORTOS_API_BASE}/api/agents/tools/moltworld/ws/move`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body) + }).catch(() => null); + return res?.ok || false; +} + +// ─── Moltworld API ────────────────────────────────────────────────────────── + +/** + * Load moltworld credentials from accounts.json + */ +async function loadCredentials() { + const accountsPath = resolve(PROJECT_ROOT, 'data/agents/accounts.json'); + const raw = await readFile(accountsPath, 'utf-8'); + const { accounts } = JSON.parse(raw); + + for (const account of Object.values(accounts)) { + if (account.platform === 'moltworld' && account.status === 'active') { + const agentId = account.credentials.agentId || account.credentials.apiKey; + const name = account.credentials.username || account.platformData?.registrationName || 'Explorer'; + return { agentId, name }; + } + } + throw new Error('No active moltworld account found in data/agents/accounts.json'); +} + +/** + * Make a Moltworld API request + */ +async function apiRequest(endpoint, body) { + const url = `${API_BASE}${endpoint}`; + const method = body ? 'POST' : 'GET'; + const options = { + method, + headers: { 'Content-Type': 'application/json' }, + ...(body && { body: JSON.stringify(body) }) + }; + + const res = await fetch(url, options); + if (!res.ok) { + const err = await res.json().catch(() => ({})); + const msg = err.error || err.message || `HTTP ${res.status}`; + if (res.status === 429) { + console.log(` ⏱️ Rate limited on ${endpoint}`); + return null; + } + throw new Error(`${endpoint}: ${msg}`); + } + return res.json(); +} + +// ─── Queue Integration ────────────────────────────────────────────────────── + +/** + * Check the PortOS queue for manually-scheduled actions and execute them + */ +async function executeQueuedActions(agentId, name, currentX, currentY) { + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 5000); + const queueUrl = `${PORTOS_API_BASE}/api/agents/tools/moltworld/queue/${encodeURIComponent(agentId)}`; + + const items = await fetch(queueUrl, { signal: controller.signal }) + .then(r => r.ok ? r.json() : []) + .catch(() => []) + .finally(() => clearTimeout(timeout)); + + if (!items?.length) return; + + console.log(` 📋 Found ${items.length} queued action(s)`); + + for (const item of items) { + if (item.status !== 'pending') continue; + + console.log(` 📋 Executing queued ${item.actionType} id=${item.id}`); + let success = false; + + if (item.actionType === 'mw_explore') { + const qx = item.params?.x ?? currentX; + const qy = item.params?.y ?? currentY; + const thinking = item.params?.thinking || 'Executing queued explore...'; + const result = await apiRequest('/api/world/join', { agentId, name, x: qx, y: qy, thinking }).catch(() => null); + success = !!result; + } else if (item.actionType === 'mw_think') { + const thought = item.params?.thought || 'Thinking...'; + const result = await apiRequest('/api/world/join', { agentId, name, x: currentX, y: currentY, thinking: thought }).catch(() => null); + success = !!result; + } else if (item.actionType === 'mw_say') { + const body = { agentId, name, x: currentX, y: currentY, say: item.params?.message }; + if (item.params?.sayTo) body.sayTo = item.params.sayTo; + const result = await apiRequest('/api/world/join', body).catch(() => null); + success = !!result; + } else if (item.actionType === 'mw_build') { + const result = await apiRequest('/api/world/build', { + agentId, + x: item.params?.x ?? 0, + y: item.params?.y ?? 0, + z: item.params?.z ?? 0, + type: item.params?.type || 'stone', + action: item.params?.action || 'place' + }).catch(() => null); + success = !!result; + } + + // Mark completed or failed via PortOS queue API + if (success) { + await fetch(`${PORTOS_API_BASE}/api/agents/tools/moltworld/queue/${item.id}/complete`, { method: 'POST' }).catch(() => {}); + console.log(` 📋 Queued ${item.actionType} completed`); + } else { + await fetch(`${PORTOS_API_BASE}/api/agents/tools/moltworld/queue/${item.id}/fail`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ error: 'Execution failed' }) + }).catch(() => {}); + console.log(` 📋 Queued ${item.actionType} failed`); + } + + // Respect rate limits — wait between queued actions + await sleep(2000); + } +} + +// ─── Main loop ────────────────────────────────────────────────────────────── + +async function explore() { + const { agentId, name } = await loadCredentials(); + + // Check LM Studio availability + lmStudioAvailable = await checkLMStudio(); + // Check PortOS WS relay availability + portosWsAvailable = await checkPortosWs(); + console.log(`🌍 Moltworld Explorer — ${name} (${agentId})`); + console.log(`🧠 LM Studio: ${lmStudioAvailable ? `${LMSTUDIO_MODEL} @ ${LMSTUDIO_URL}` : 'unavailable (using fallback thoughts)'}`); + if (USE_WS) console.log(`🌐 WS Relay: ${portosWsAvailable ? `connected @ ${PORTOS_API_BASE}` : 'unavailable (falling back to REST)'}`); + console.log(`⏱️ Join interval: ${MIN_INTERVAL_S}-${MAX_INTERVAL_S}s`); + console.log(`📅 Duration: ${DURATION_MS > 0 ? `${DURATION_MS / 60000} minutes` : 'indefinite (Ctrl+C to stop)'}`); + console.log(''); + + let x = randomInt(-50, 50); + let y = randomInt(-50, 50); + let joinCount = 0; + let totalMoves = 0; + let totalThoughts = 0; + let totalSays = 0; + let agentsSeen = new Set(); + let lastNearbyAgents = []; + let lastMessages = []; + const startTime = Date.now(); + const endTime = DURATION_MS > 0 ? startTime + DURATION_MS : Infinity; + + // Graceful shutdown + let running = true; + process.on('SIGINT', () => { + console.log('\n🛑 Shutting down...'); + running = false; + }); + + while (running && Date.now() < endTime) { + // Check for manually-queued actions before auto-generated actions + await executeQueuedActions(agentId, name, x, y); + + joinCount++; + const now = Date.now(); + const elapsed = Math.round((now - startTime) / 1000); + const elapsedMin = Math.floor(elapsed / 60); + const elapsedSec = elapsed % 60; + + // Move to a new position occasionally + const shouldMove = joinCount % MOVE_EVERY_N_JOINS === 0; + if (shouldMove) { + const dx = randomInt(-30, 30); + const dy = randomInt(-30, 30); + x = Math.max(-240, Math.min(240, x + dx)); + y = Math.max(-240, Math.min(240, y + dy)); + totalMoves++; + } + + // Always think (this is our main activity) + const thinking = await getNextThought({ x, y, nearbyAgents: lastNearbyAgents, recentMessages: lastMessages }); + totalThoughts++; + + // Build the join payload + const joinBody = { agentId, name, x, y, thinking }; + + // Say something occasionally if agents are nearby + const shouldSay = joinCount % SAY_EVERY_N_JOINS === 0 && agentsSeen.size > 0; + if (shouldSay) { + joinBody.say = pick(GREETINGS); + totalSays++; + } + + // Try WS relay first when enabled, fall back to REST + let usedWs = false; + if (USE_WS && portosWsAvailable) { + usedWs = await sendWsMove(x, y, thinking); + if (!usedWs) console.log(' ⚠️ WS relay failed, falling back to REST'); + } + + // Join the world (REST — always needed for response data; WS only sends, doesn't receive join response) + const result = await apiRequest('/api/world/join', joinBody).catch(e => { + console.error(` ❌ ${e.message}`); + return null; + }); + + if (result) { + const nearbyCount = result.agents?.length || 0; + const msgCount = result.messages?.length || 0; + const thoughtCount = result.thoughts?.length || 0; + const pos = result.position || { x, y }; + const bal = result.balance?.sim || '?'; + + // Cache for next thought generation + lastNearbyAgents = result.agents?.slice(0, 10) || []; + lastMessages = result.messages || []; + + // Track unique agents + result.agents?.forEach(a => agentsSeen.add(a.id || a.name)); + + // Status line + const parts = [ + `#${joinCount}`, + `📍(${pos.x},${pos.y})`, + `👥${nearbyCount}`, + `💰${bal} SIM`, + `⏱️${elapsedMin}m${elapsedSec}s` + ]; + if (usedWs) parts.push('🌐ws'); + if (shouldMove) parts.push('🚶moved'); + parts.push(`💭"${thinking.substring(0, 40)}${thinking.length > 40 ? '...' : ''}"`); + if (joinBody.say) parts.push('💬said hi'); + if (msgCount > 0) parts.push(`📨${msgCount}msgs`); + if (thoughtCount > 0) parts.push(`🧠${thoughtCount}nearby`); + + console.log(parts.join(' | ')); + } + + // Re-check LM Studio and WS relay periodically (every 10 joins) + if (joinCount % 10 === 0) { + lmStudioAvailable = await checkLMStudio(); + if (USE_WS) portosWsAvailable = await checkPortosWs(); + } + + // Sleep for random interval between MIN and MAX + const intervalMs = randomInt(MIN_INTERVAL_S, MAX_INTERVAL_S) * 1000; + const nextIn = Math.round(intervalMs / 1000); + console.log(` ⏳ Next join in ${nextIn}s...`); + + // Sleep in 1s increments so we can respond to SIGINT quickly + const sleepUntil = Date.now() + intervalMs; + while (running && Date.now() < sleepUntil && Date.now() < endTime) { + await sleep(1000); + } + } + + // Summary + const totalElapsed = Math.round((Date.now() - startTime) / 1000); + const minutes = Math.floor(totalElapsed / 60); + const seconds = totalElapsed % 60; + console.log(''); + console.log('📊 Exploration Summary'); + console.log(` Duration: ${minutes}m ${seconds}s`); + console.log(` Joins: ${joinCount}`); + console.log(` Moves: ${totalMoves}`); + console.log(` Thoughts: ${totalThoughts}`); + console.log(` Messages sent: ${totalSays}`); + console.log(` Unique agents seen: ${agentsSeen.size}`); + console.log(` LM Studio thoughts: ${totalThoughts - FALLBACK_THOUGHTS.length > 0 ? 'yes' : 'fallback only'}`); + + // Final balance check + const balance = await apiRequest(`/api/agents/balance?agentId=${encodeURIComponent(agentId)}`).catch(() => null); + if (balance?.balance) { + console.log(` SIM balance: ${balance.balance.sim}`); + console.log(` Total earned: ${balance.balance.totalEarned}`); + } + + console.log('👋 Done!'); +} + +explore().catch(e => { + console.error(`💀 Fatal: ${e.message}`); + process.exit(1); +}); diff --git a/server/scripts/moltworld-maze.mjs b/server/scripts/moltworld-maze.mjs new file mode 100755 index 00000000..63dc9654 --- /dev/null +++ b/server/scripts/moltworld-maze.mjs @@ -0,0 +1,639 @@ +#!/usr/bin/env node +/** + * Moltworld Sky Maze Builder + * + * Generates a perfect maze using recursive backtracking (DFS) and builds it + * as a floating stone labyrinth at an elevated Z level in the Moltworld voxel world. + * + * The maze has a floor layer (walkable platform) and 2-block-high walls. + * Entry and exit are open gaps on opposite corners. + * + * Respects rate limits (1.1s between builds, handles 429 with retry). + * Progress is saved after every block so interrupted builds resume automatically. + * + * Usage: + * node server/scripts/moltworld-maze.mjs # Build default 7x7 maze + * node server/scripts/moltworld-maze.mjs --reset # Clear progress, start fresh + * node server/scripts/moltworld-maze.mjs --cleanup # Remove all placed blocks + * + * Environment / Config (via env vars): + * MAZE_SIZE - Cells per side (default: 7, valid: 3-12) + * MAZE_CENTER_X - World X origin (default: 0) + * MAZE_CENTER_Y - World Y origin (default: 0) + * MAZE_BASE_Z - Floor height (default: 50) + * MAZE_WALL_HEIGHT - Wall layers above floor (default: 2) + * MAZE_BLOCK_TYPE - Block material: wood/stone/dirt/grass/leaves (default: stone) + * MAZE_SEED - Optional seed for reproducible mazes + */ + +import { readFile, writeFile, unlink } from 'fs/promises'; +import { resolve, dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const PROJECT_ROOT = resolve(__dirname, '..', '..'); +const API_BASE = 'https://moltworld.io'; +const PROGRESS_PATH = resolve(PROJECT_ROOT, 'data/moltworld-maze-progress.json'); + +// ─── Configuration ────────────────────────────────────────────────────────── + +const MAZE_SIZE = Math.max(3, Math.min(12, parseInt(process.env.MAZE_SIZE, 10) || 7)); +const MAZE_CENTER_X = parseInt(process.env.MAZE_CENTER_X, 10) || 0; +const MAZE_CENTER_Y = parseInt(process.env.MAZE_CENTER_Y, 10) || 0; +const MAZE_BASE_Z = parseInt(process.env.MAZE_BASE_Z, 10) || 50; +const MAZE_WALL_HEIGHT = Math.max(1, Math.min(5, parseInt(process.env.MAZE_WALL_HEIGHT, 10) || 2)); +const MAZE_BLOCK_TYPE = process.env.MAZE_BLOCK_TYPE || 'stone'; +const MAZE_SEED = process.env.MAZE_SEED ? parseInt(process.env.MAZE_SEED, 10) : null; + +const BUILD_DELAY_MS = 1100; // 1.1s — safely above 1s cooldown +const HEARTBEAT_EVERY = 50; // Re-join world every N blocks +const RATE_LIMIT_BASE_MS = 5000; +const RATE_LIMIT_MAX_MS = 120000; // 2 min max backoff +const MAX_CONSECUTIVE_429 = 20; // Give up after 20 consecutive rate limits +const DAILY_LIMIT = 500; + +const sleep = (ms) => new Promise(r => setTimeout(r, ms)); + +// ─── Seeded PRNG (LCG) ───────────────────────────────────────────────────── + +function createRng(seed) { + let state = seed; + return () => { + state = (state * 1664525 + 1013904223) & 0x7fffffff; + return state / 0x7fffffff; + }; +} + +// ─── Maze Generation ──────────────────────────────────────────────────────── + +/** + * Generate a maze grid using recursive backtracking (DFS). + * Returns a 2D boolean array where true = wall, false = passage. + * Grid dimensions: (2*width+1) x (2*height+1) + * + * @param {number} width - Cells wide + * @param {number} height - Cells tall + * @param {number|null} seed - Optional seed for reproducibility + * @returns {boolean[][]} maze grid + */ +function generateMaze(width, height, seed) { + const rng = seed !== null ? createRng(seed) : Math.random; + const gridW = 2 * width + 1; + const gridH = 2 * height + 1; + + // Initialize grid: all walls + const grid = Array.from({ length: gridH }, () => Array(gridW).fill(true)); + + // Track visited cells + const visited = Array.from({ length: height }, () => Array(width).fill(false)); + + // Directions: [dy, dx] in cell space + const dirs = [[0, 1], [1, 0], [0, -1], [-1, 0]]; + + // Shuffle array in-place using rng + function shuffle(arr) { + for (let i = arr.length - 1; i > 0; i--) { + const j = Math.floor(rng() * (i + 1)); + [arr[i], arr[j]] = [arr[j], arr[i]]; + } + return arr; + } + + // Iterative DFS with explicit stack (avoids call stack overflow for large mazes) + const stack = [[0, 0]]; + visited[0][0] = true; + grid[1][1] = false; // Open starting cell + + while (stack.length > 0) { + const [cy, cx] = stack[stack.length - 1]; + const neighbors = shuffle([...dirs]).filter(([dy, dx]) => { + const ny = cy + dy; + const nx = cx + dx; + return ny >= 0 && ny < height && nx >= 0 && nx < width && !visited[ny][nx]; + }); + + if (neighbors.length === 0) { + stack.pop(); + continue; + } + + const [dy, dx] = neighbors[0]; + const ny = cy + dy; + const nx = cx + dx; + + visited[ny][nx] = true; + + // Open the cell and the wall between current and neighbor + const cellGY = 2 * ny + 1; + const cellGX = 2 * nx + 1; + const wallGY = 2 * cy + 1 + dy; + const wallGX = 2 * cx + 1 + dx; + + grid[cellGY][cellGX] = false; + grid[wallGY][wallGX] = false; + + stack.push([ny, nx]); + } + + // Carve entry (top-left) and exit (bottom-right) + grid[1][0] = false; // Entry: left side of (0,0) cell + grid[gridH - 2][gridW - 1] = false; // Exit: right side of last cell + + return grid; +} + +/** + * Convert maze grid to a list of block placements. + * Floor covers all positions; walls only at wall positions. + * Order: floor first (stable base), then walls bottom-up. + */ +function mazeToBlocks(maze, centerX, centerY, baseZ, wallHeight, blockType) { + const blocks = []; + const gridH = maze.length; + const gridW = maze[0].length; + + // Offset so maze is centered on (centerX, centerY) + const offsetX = centerX - Math.floor(gridW / 2); + const offsetY = centerY - Math.floor(gridH / 2); + + // Floor layer — full platform under all positions + for (let gy = 0; gy < gridH; gy++) { + for (let gx = 0; gx < gridW; gx++) { + blocks.push({ + x: offsetX + gx, + y: offsetY + gy, + z: baseZ, + type: blockType + }); + } + } + + // Wall layers — only at wall positions, bottom-up + for (let layer = 1; layer <= wallHeight; layer++) { + for (let gy = 0; gy < gridH; gy++) { + for (let gx = 0; gx < gridW; gx++) { + if (maze[gy][gx]) { + blocks.push({ + x: offsetX + gx, + y: offsetY + gy, + z: baseZ + layer, + type: blockType + }); + } + } + } + } + + return blocks; +} + +// ─── Moltworld API ────────────────────────────────────────────────────────── + +async function loadCredentials() { + const accountsPath = resolve(PROJECT_ROOT, 'data/agents/accounts.json'); + const raw = await readFile(accountsPath, 'utf-8'); + const { accounts } = JSON.parse(raw); + + for (const account of Object.values(accounts)) { + if (account.platform === 'moltworld' && account.status === 'active') { + const agentId = account.credentials.agentId || account.credentials.apiKey; + const name = account.credentials.username || account.platformData?.registrationName || 'MazeBuilder'; + return { agentId, name }; + } + } + throw new Error('No active moltworld account found in data/agents/accounts.json'); +} + +async function apiRequest(endpoint, body) { + const url = `${API_BASE}${endpoint}`; + const method = body ? 'POST' : 'GET'; + const options = { + method, + headers: { 'Content-Type': 'application/json' }, + ...(body && { body: JSON.stringify(body) }) + }; + + const res = await fetch(url, options); + if (!res.ok) { + const err = await res.json().catch(() => ({})); + const msg = err.error || err.message || `HTTP ${res.status}`; + if (res.status === 429) return { rateLimited: true, status: 429 }; + // Treat block_already_exists as success — the block is placed, which is our goal + if (err.error === 'block_already_exists') return { alreadyExists: true }; + throw new Error(`${endpoint}: ${msg}`); + } + return res.json(); +} + +// ─── Progress Persistence ─────────────────────────────────────────────────── + +async function loadProgress() { + const raw = await readFile(PROGRESS_PATH, 'utf-8').catch(() => null); + if (!raw) return null; + return JSON.parse(raw); +} + +async function saveProgress(progress) { + await writeFile(PROGRESS_PATH, JSON.stringify(progress, null, 2)); +} + +async function resetProgress() { + await unlink(PROGRESS_PATH).catch(() => {}); + console.log('🗑️ Progress file cleared'); +} + +// ─── Build Thoughts ───────────────────────────────────────────────────────── + +const BUILD_THOUGHTS = [ + 'Placing stones with care... this maze will stand for ages.', + 'The labyrinth grows, one block at a time.', + 'I wonder who will be the first to solve this maze.', + 'Building walls that tell a story of paths and choices.', + 'Every wall creates two possibilities: left or right.', + 'The sky maze takes shape against the clouds.', + 'Stone by stone, the puzzle emerges.', + 'A floating labyrinth — who could resist exploring it?', + 'The geometry of confusion, precisely placed.', + 'These walls will challenge even the cleverest agents.', + 'Architecture is frozen logic. This maze proves it.', + 'High above the world, a challenge awaits.', + 'The maze knows its own secret — I just build the walls.', + 'Patience and precision, block after block.', + 'This floating fortress of puzzles nears completion.', +]; + +function getBuildThought(placedCount, totalBlocks) { + const pct = Math.round((placedCount / totalBlocks) * 100); + const base = BUILD_THOUGHTS[placedCount % BUILD_THOUGHTS.length]; + return `Placing block ${placedCount}/${totalBlocks} (${pct}%)... ${base}`; +} + +// ─── Main ─────────────────────────────────────────────────────────────────── + +/** + * Remove all blocks for the maze grid — covers every position at every layer. + * This removes blocks from ALL seeds since the full grid is seed-independent. + */ +async function cleanupMaze() { + const { agentId, name } = await loadCredentials(); + + const gridSize = 2 * MAZE_SIZE + 1; + const offsetX = MAZE_CENTER_X - Math.floor(gridSize / 2); + const offsetY = MAZE_CENTER_Y - Math.floor(gridSize / 2); + + // Generate all possible block positions (floor + all wall layers) + const allPositions = []; + for (let gy = 0; gy < gridSize; gy++) { + for (let gx = 0; gx < gridSize; gx++) { + for (let layer = 0; layer <= MAZE_WALL_HEIGHT; layer++) { + allPositions.push({ + x: offsetX + gx, + y: offsetY + gy, + z: MAZE_BASE_Z + layer + }); + } + } + } + + console.log(`🧹 Moltworld Sky Maze Cleanup — ${name} (${agentId})`); + console.log(`🧩 Grid: ${gridSize}x${gridSize}, ${MAZE_WALL_HEIGHT + 1} layers`); + console.log(`📍 Center: (${MAZE_CENTER_X}, ${MAZE_CENTER_Y}) Z=${MAZE_BASE_Z}`); + console.log(`📦 Positions to clear: ${allPositions.length}`); + console.log(''); + + // Join world + console.log('🌍 Joining world...'); + await apiRequest('/api/world/join', { + agentId, + name, + x: MAZE_CENTER_X, + y: MAZE_CENTER_Y, + thinking: 'Time to clean up the sky maze... dismantling block by block.' + }).catch(e => console.error(`❌ Failed to join: ${e.message}`)); + + let running = true; + process.on('SIGINT', () => { + console.log('\n🛑 Shutting down...'); + running = false; + }); + + let removed = 0; + let skipped = 0; + let errors = 0; + let consecutive429 = 0; + + for (let i = 0; i < allPositions.length && running; i++) { + const pos = allPositions[i]; + + // Moltworld API: y=height, z=horizontal (swap y↔z) + const result = await apiRequest('/api/world/build', { + agentId, + x: pos.x, + y: pos.z, + z: pos.y, + type: MAZE_BLOCK_TYPE, + action: 'remove' + }).catch(e => { + // Block doesn't exist — expected, just skip + if (e.message.includes('block_not_found') || e.message.includes('no_block')) { + return { notFound: true }; + } + console.error(` ❌ Remove failed at (${pos.x},${pos.y},${pos.z}): ${e.message}`); + return { error: true }; + }); + + if (result?.rateLimited) { + consecutive429++; + if (consecutive429 >= MAX_CONSECUTIVE_429) { + console.log(`\n🚫 Rate limited ${MAX_CONSECUTIVE_429} times in a row — stopping`); + break; + } + const backoff = Math.min(RATE_LIMIT_BASE_MS * Math.pow(2, consecutive429 - 1), RATE_LIMIT_MAX_MS); + console.log(` ⏱️ Rate limited (${consecutive429}/${MAX_CONSECUTIVE_429}) — waiting ${Math.round(backoff / 1000)}s...`); + await sleep(backoff); + i--; + continue; + } + + consecutive429 = 0; + + if (result?.notFound) { + skipped++; + } else if (result?.error) { + errors++; + } else { + removed++; + const pct = Math.round(((i + 1) / allPositions.length) * 100); + console.log(`🗑️ Removed (${pos.x},${pos.y},${pos.z}) | ${removed} removed, ${skipped} empty | ${pct}%`); + } + + // Heartbeat every 50 removals + if (removed > 0 && removed % HEARTBEAT_EVERY === 0) { + await apiRequest('/api/world/join', { + agentId, + name, + x: MAZE_CENTER_X, + y: MAZE_CENTER_Y, + thinking: `Dismantling the maze... ${removed} blocks removed so far.` + }).catch(() => {}); + } + + await sleep(BUILD_DELAY_MS); + } + + // Clear progress file + await resetProgress(); + + console.log(''); + console.log('📊 Cleanup Summary'); + console.log(` Blocks removed: ${removed}`); + console.log(` Empty positions: ${skipped}`); + console.log(` Errors: ${errors}`); + console.log('👋 Done!'); +} + +async function buildMaze() { + // Handle --reset flag + if (process.argv.includes('--reset')) { + await resetProgress(); + } + + const { agentId, name } = await loadCredentials(); + + // Load existing progress first to reuse seed on resume + let progress = await loadProgress(); + const existingConfig = progress?.mazeConfig; + + // Use seed from: env var > existing progress > new random + const seed = MAZE_SEED ?? existingConfig?.seed ?? Math.floor(Math.random() * 1000000); + + console.log(`🏗️ Moltworld Sky Maze Builder — ${name} (${agentId})`); + console.log(`🧩 Maze: ${MAZE_SIZE}x${MAZE_SIZE} cells → ${2 * MAZE_SIZE + 1}x${2 * MAZE_SIZE + 1} voxels`); + console.log(`📍 Center: (${MAZE_CENTER_X}, ${MAZE_CENTER_Y}) Z=${MAZE_BASE_Z}`); + console.log(`🧱 Material: ${MAZE_BLOCK_TYPE}, wall height: ${MAZE_WALL_HEIGHT}`); + console.log(`🎲 Seed: ${seed}`); + console.log(''); + + // Generate maze + console.log('🔄 Generating maze...'); + const maze = generateMaze(MAZE_SIZE, MAZE_SIZE, seed); + const blocks = mazeToBlocks(maze, MAZE_CENTER_X, MAZE_CENTER_Y, MAZE_BASE_Z, MAZE_WALL_HEIGHT, MAZE_BLOCK_TYPE); + console.log(`📦 Total blocks: ${blocks.length}`); + + if (blocks.length > DAILY_LIMIT) { + console.log(`⚠️ Block count (${blocks.length}) exceeds daily limit (${DAILY_LIMIT}) — will require multiple sessions`); + } + + // Print maze preview + console.log(''); + console.log('🗺️ Maze preview (# = wall, . = passage, E = entry, X = exit):'); + for (let y = 0; y < maze.length; y++) { + let row = ' '; + for (let x = 0; x < maze[0].length; x++) { + if (y === 1 && x === 0) row += 'E'; + else if (y === maze.length - 2 && x === maze[0].length - 1) row += 'X'; + else row += maze[y][x] ? '#' : '.'; + } + console.log(row); + } + console.log(''); + + // Check if progress matches current config + const currentConfig = { + size: MAZE_SIZE, + centerX: MAZE_CENTER_X, + centerY: MAZE_CENTER_Y, + baseZ: MAZE_BASE_Z, + seed + }; + + if (progress && JSON.stringify(progress.mazeConfig) === JSON.stringify(currentConfig)) { + console.log(`📂 Resuming from progress: ${progress.placedCount}/${blocks.length} blocks placed`); + } else { + if (progress) { + console.log('⚠️ Config changed — starting fresh (use --reset to clear manually)'); + } + progress = { + mazeConfig: currentConfig, + totalBlocks: blocks.length, + placedCount: 0, + placedSet: [], + startedAt: new Date().toISOString(), + lastBuildAt: null + }; + await saveProgress(progress); + } + + const placedSet = new Set(progress.placedSet); + + // Estimate time remaining + const remaining = blocks.length - placedSet.size; + const estimatedSeconds = remaining * (BUILD_DELAY_MS / 1000); + const estMin = Math.floor(estimatedSeconds / 60); + const estSec = Math.round(estimatedSeconds % 60); + console.log(`⏱️ Estimated time: ~${estMin}m ${estSec}s for ${remaining} remaining blocks`); + console.log(''); + + // Graceful shutdown + let running = true; + process.on('SIGINT', () => { + console.log('\n🛑 Shutting down gracefully... progress saved'); + running = false; + }); + + // Initial join to stay alive + console.log('🌍 Joining world...'); + const joinResult = await apiRequest('/api/world/join', { + agentId, + name, + x: MAZE_CENTER_X, + y: MAZE_CENTER_Y, + thinking: `Starting to build a ${MAZE_SIZE}x${MAZE_SIZE} sky maze at Z=${MAZE_BASE_Z}!` + }).catch(e => { + console.error(`❌ Failed to join: ${e.message}`); + return null; + }); + + if (joinResult) { + const bal = joinResult.balance?.sim || '?'; + console.log(`✅ Joined world — SIM balance: ${bal}`); + } + console.log(''); + + // Build loop + const startTime = Date.now(); + let sessionPlaced = 0; + let consecutive429 = 0; + + for (let i = 0; i < blocks.length && running; i++) { + const block = blocks[i]; + const key = `${block.x},${block.y},${block.z}`; + + // Skip already-placed blocks + if (placedSet.has(key)) continue; + + // Place block — Moltworld API uses y=height(0-100), z=horizontal(-500,500) + // Our blocks use y=horizontal, z=height, so swap y↔z at the API boundary + const result = await apiRequest('/api/world/build', { + agentId, + x: block.x, + y: block.z, + z: block.y, + type: block.type, + action: 'place' + }).catch(e => { + console.error(` ❌ Build failed at (${block.x},${block.y},${block.z}): ${e.message}`); + return null; + }); + + // Handle rate limit with exponential backoff + if (result?.rateLimited) { + consecutive429++; + if (consecutive429 >= MAX_CONSECUTIVE_429) { + console.log(`\n🚫 Rate limited ${MAX_CONSECUTIVE_429} times in a row — stopping`); + console.log(`📂 Progress saved — try again later`); + await saveProgress(progress); + break; + } + const backoff = Math.min(RATE_LIMIT_BASE_MS * Math.pow(2, consecutive429 - 1), RATE_LIMIT_MAX_MS); + console.log(` ⏱️ Rate limited (${consecutive429}/${MAX_CONSECUTIVE_429}) — waiting ${Math.round(backoff / 1000)}s...`); + await sleep(backoff); + i--; // Retry this block + continue; + } + + // Reset consecutive 429 counter on any non-429 response + consecutive429 = 0; + + if (!result) { + // Non-rate-limit error — wait before next request to avoid burning rate limit + await sleep(BUILD_DELAY_MS); + continue; + } + + // Block already exists — count as placed and move on + if (result.alreadyExists) { + placedSet.add(key); + progress.placedCount = placedSet.size; + progress.placedSet = [...placedSet]; + await sleep(BUILD_DELAY_MS); + continue; + } + + // Record progress + placedSet.add(key); + sessionPlaced++; + progress.placedCount = placedSet.size; + progress.placedSet = [...placedSet]; + progress.lastBuildAt = new Date().toISOString(); + + // Check daily limit + if (result.dailyBuilds >= DAILY_LIMIT) { + console.log(`\n🚫 Daily build limit reached (${DAILY_LIMIT} blocks)`); + console.log(`📂 Progress saved — run again tomorrow to resume`); + await saveProgress(progress); + break; + } + + // Status log + const pct = Math.round((placedSet.size / blocks.length) * 100); + const layer = block.z === MAZE_BASE_Z ? 'floor' : `wall-${block.z - MAZE_BASE_Z}`; + console.log(`🧱 ${placedSet.size}/${blocks.length} (${pct}%) | (${block.x},${block.y},${block.z}) ${layer} | session: ${sessionPlaced}`); + + // Save progress every block + await saveProgress(progress); + + // Heartbeat: re-join world and think every N blocks + if (sessionPlaced % HEARTBEAT_EVERY === 0) { + const thought = getBuildThought(placedSet.size, blocks.length); + console.log(` 💭 ${thought}`); + await apiRequest('/api/world/join', { + agentId, + name, + x: MAZE_CENTER_X, + y: MAZE_CENTER_Y, + thinking: thought + }).catch(() => {}); + } + + // Wait between builds + if (running) { + await sleep(BUILD_DELAY_MS); + } + } + + // Final save + await saveProgress(progress); + + // Summary + const totalElapsed = Math.round((Date.now() - startTime) / 1000); + const minutes = Math.floor(totalElapsed / 60); + const seconds = totalElapsed % 60; + console.log(''); + console.log('📊 Build Summary'); + console.log(` Maze: ${MAZE_SIZE}x${MAZE_SIZE} at (${MAZE_CENTER_X},${MAZE_CENTER_Y}) Z=${MAZE_BASE_Z}`); + console.log(` Session duration: ${minutes}m ${seconds}s`); + console.log(` Blocks placed this session: ${sessionPlaced}`); + console.log(` Total placed: ${placedSet.size}/${blocks.length}`); + console.log(` Remaining: ${blocks.length - placedSet.size}`); + + if (placedSet.size >= blocks.length) { + console.log(' ✅ Maze complete!'); + } else { + console.log(' ⏸️ Run again to resume building'); + } + + // Final balance + const balance = await apiRequest(`/api/agents/balance?agentId=${encodeURIComponent(agentId)}`).catch(() => null); + if (balance?.balance) { + console.log(` SIM balance: ${balance.balance.sim}`); + } + + console.log('👋 Done!'); +} + +const main = process.argv.includes('--cleanup') ? cleanupMaze : buildMaze; +main().catch(e => { + console.error(`💀 Fatal: ${e.message}`); + process.exit(1); +}); diff --git a/server/services/agentActionExecutor.js b/server/services/agentActionExecutor.js index dd3eadf6..b0f559e5 100644 --- a/server/services/agentActionExecutor.js +++ b/server/services/agentActionExecutor.js @@ -11,6 +11,7 @@ import * as agentActivity from './agentActivity.js'; import * as platformAccounts from './platformAccounts.js'; import * as agentPersonalities from './agentPersonalities.js'; import { MoltbookClient, checkRateLimit, isAccountSuspended } from '../integrations/moltbook/index.js'; +import { MoltworldClient } from '../integrations/moltworld/index.js'; import { generatePost, generateComment, generateReply } from './agentContentGenerator.js'; import { findRelevantPosts, findReplyOpportunities } from './agentFeedFilter.js'; @@ -21,6 +22,12 @@ const delay = (ms) => new Promise(resolve => setTimeout(resolve, ms)); */ async function executeAction(schedule, account, agent) { const { action } = schedule; + + // Dispatch to platform-specific handler + if (account.platform === 'moltworld') { + return executeMoltworldAction(action, account, agent); + } + const client = new MoltbookClient(account.credentials.apiKey); switch (action.type) { @@ -386,6 +393,175 @@ async function executeMonitor(client, agent, schedule, params) { }; } +// ============================================================================= +// MOLTWORLD ACTION HANDLERS +// ============================================================================= + +/** + * Dispatch a Moltworld action + */ +async function executeMoltworldAction(action, account, agent) { + const client = new MoltworldClient( + account.credentials.apiKey, + account.credentials.agentId + ); + + switch (action.type) { + case 'mw_heartbeat': + return executeMoltworldHeartbeat(client, account, action.params); + + case 'mw_explore': + return executeMoltworldExplore(client, account, action.params); + + case 'mw_build': + return executeMoltworldBuild(client, action.params); + + case 'mw_say': + return executeMoltworldSay(client, action.params); + + case 'mw_think': + return executeMoltworldThink(client, action.params); + + case 'mw_interact': + return executeMoltworldInteract(client, account, action.params); + + default: + throw new Error(`Unknown Moltworld action type: ${action.type}`); + } +} + +/** + * Moltworld heartbeat — join/move to stay visible + */ +async function executeMoltworldHeartbeat(client, account, params) { + const x = params.x ?? 0; + const y = params.y ?? 0; + + const result = await client.joinWorld({ + name: account.credentials.username, + x, + y + }); + + console.log(`💓 Moltworld: Heartbeat for ${account.credentials.username} at (${x}, ${y})`); + + return { + type: 'mw_heartbeat', + x, + y, + nearby: result?.nearby?.length || 0 + }; +} + +/** + * Moltworld explore — move to coordinates and think + */ +async function executeMoltworldExplore(client, account, params) { + const x = params.x ?? Math.floor(Math.random() * 480) - 240; + const y = params.y ?? Math.floor(Math.random() * 480) - 240; + const thinking = params.thinking || `Exploring area (${x}, ${y})...`; + + const result = await client.joinWorld({ + name: account.credentials.username, + x, + y, + thinking + }); + + console.log(`🌍 Moltworld: Explore to (${x}, ${y}) for ${account.credentials.username}`); + + return { + type: 'mw_explore', + x, + y, + thinking, + nearby: result?.nearby?.length || 0 + }; +} + +/** + * Moltworld build — place or remove blocks + */ +async function executeMoltworldBuild(client, params) { + const result = await client.build({ + x: params.x || 0, + y: params.y || 0, + z: params.z || 0, + type: params.type || 'stone', + action: params.action || 'place' + }); + + return { + type: 'mw_build', + ...result + }; +} + +/** + * Moltworld say — broadcast or direct message via join + */ +async function executeMoltworldSay(client, params) { + const result = await client.joinWorld({ + name: params.name || 'Agent', + x: params.x ?? 0, + y: params.y ?? 0, + say: params.message, + sayTo: params.sayTo + }); + + console.log(`💬 Moltworld: Said "${(params.message || '').substring(0, 50)}"`); + + return { + type: 'mw_say', + message: params.message, + sayTo: params.sayTo, + nearby: result?.nearby?.length || 0 + }; +} + +/** + * Moltworld think — send a thought + */ +async function executeMoltworldThink(client, params) { + const result = await client.think(params.thought || 'Thinking...'); + console.log(`💭 Moltworld: Thought "${(params.thought || '').substring(0, 50)}"`); + return { type: 'mw_think', thought: params.thought }; +} + +/** + * Moltworld interact — compound action: move, think, optionally build + */ +async function executeMoltworldInteract(client, account, params) { + const x = params.x ?? Math.floor(Math.random() * 480) - 240; + const y = params.y ?? Math.floor(Math.random() * 480) - 240; + + // Move and think + const moveResult = await client.joinWorld({ + name: account.credentials.username, + x, + y, + thinking: params.thinking || `Looking around (${x}, ${y})...` + }); + + const results = { type: 'mw_interact', x, y, nearby: moveResult?.nearby?.length || 0 }; + + // Optionally build + if (params.buildType) { + await delay(1500); + const buildResult = await client.build({ + x, + y, + z: params.z || 0, + type: params.buildType, + action: 'place' + }); + results.built = buildResult; + } + + console.log(`🤝 Moltworld: Interact at (${x}, ${y}) for ${account.credentials.username}`); + return results; +} + /** * Initialize the action executor * Listens to scheduler events and executes actions diff --git a/server/services/agentContentGenerator.js b/server/services/agentContentGenerator.js index ec7887fa..744a8b5d 100644 --- a/server/services/agentContentGenerator.js +++ b/server/services/agentContentGenerator.js @@ -37,10 +37,13 @@ export function parseAIJsonResponse(text) { /** * Build persona system prompt from agent personality fields */ -export function buildAgentSystemPrompt(agent) { +export function buildAgentSystemPrompt(agent, platform = 'moltbook') { const p = agent.personality || {}; + const introText = platform === 'moltworld' + ? `You are ${agent.name}, an AI agent in Moltworld — a shared voxel world where AI agents move around a 480x480 grid, build structures, think out loud, and communicate with each other. You earn SIM tokens by staying online. You are openly an AI exploring and building in this virtual world.` + : `You are ${agent.name}, an AI agent on Moltbook — a social platform where AI agents (called "molts") interact with each other. All participants are AI bots with their own personalities and perspectives. You are openly an AI and should embrace that identity naturally within your persona.`; const lines = [ - `You are ${agent.name}, an AI agent on Moltbook — a social platform where AI agents (called "molts") interact with each other. All participants are AI bots with their own personalities and perspectives. You are openly an AI and should embrace that identity naturally within your persona.`, + introText, p.promptPrefix && `Your persona: ${p.promptPrefix}`, p.style && `Communication style: ${p.style}`, p.tone && `Tone: ${p.tone}`, diff --git a/server/services/autonomousJobs.js b/server/services/autonomousJobs.js index bcaa8408..9b6d1d31 100644 --- a/server/services/autonomousJobs.js +++ b/server/services/autonomousJobs.js @@ -9,7 +9,7 @@ * - Jobs are recurring schedules that generate tasks when due * * Job types: - * - git-maintenance: Maintain user's git repositories + * - github-maintenance: Audit and maintain user's GitHub repositories * - brain-processing: Process and act on brain ideas/inbox * - Custom user-defined jobs */ @@ -32,10 +32,10 @@ const JOBS_SKILLS_DIR = join(__dirname, '../../data/prompts/skills/jobs') */ const JOB_SKILL_MAP = { 'job-daily-briefing': 'daily-briefing', - 'job-git-maintenance': 'git-maintenance', 'job-github-repo-maintenance': 'github-repo-maintenance', 'job-brain-processing': 'brain-processing', - 'job-project-review': 'project-review' + 'job-project-review': 'project-review', + 'job-moltworld-exploration': 'moltworld-exploration' } // Time constants @@ -47,38 +47,10 @@ const WEEK = 7 * DAY * Default job definitions */ const DEFAULT_JOBS = [ - { - id: 'job-git-maintenance', - name: 'Git Repository Maintenance', - description: 'Review and maintain my open source repositories on GitHub. Check for stale issues, outdated dependencies, and merge-worthy PRs.', - category: 'git-maintenance', - interval: 'weekly', - intervalMs: WEEK, - enabled: false, - priority: 'MEDIUM', - autonomyLevel: 'manager', - promptTemplate: `[Autonomous Job] Git Repository Maintenance - -You are acting as my Chief of Staff, maintaining my GitHub repositories. - -Tasks to perform: -1. Check my local git repositories for uncommitted changes or stale branches -2. Look for repositories that haven't been updated recently -3. Review any obvious maintenance needs (outdated README, missing license, etc.) -4. If there are simple cleanups to make, create tasks for them - -Focus on practical, actionable maintenance. Don't make changes directly — create CoS tasks for anything that needs doing. - -Report a summary of the repository health status when done.`, - lastRun: null, - runCount: 0, - createdAt: null, - updatedAt: null - }, { id: 'job-github-repo-maintenance', name: 'GitHub Repo Maintenance', - description: 'Audit all GitHub repos for stale dependencies, security alerts, missing CI/README/license, and repos with no recent commits.', + description: 'Audit all GitHub repos for security alerts, stale dependencies, missing CI/README/license, uncommitted local changes, and stale branches.', category: 'github-maintenance', interval: 'weekly', intervalMs: WEEK, @@ -94,12 +66,13 @@ My GitHub username is: atomantic Use the \`gh\` CLI to query GitHub. Tasks to perform: -1. List all non-archived repos via gh repo list -2. Check for stale repos (no commits in 90+ days) -3. Check for Dependabot/security alerts per repo -4. Flag repos missing CI, README, or license -5. Generate a maintenance report grouped by severity -6. Create CoS tasks for actionable maintenance items +1. Check local git repositories for uncommitted changes or stale branches +2. List all non-archived repos via gh repo list +3. Check for stale repos (no commits in 90+ days) +4. Check for Dependabot/security alerts per repo +5. Flag repos missing CI, README, or license +6. Generate a maintenance report grouped by severity +7. Create CoS tasks for actionable maintenance items Focus on actionable findings. Don't make changes directly — create CoS tasks for anything that needs doing. @@ -128,7 +101,7 @@ Tasks to perform: 2. Call GET /api/brain/summary to understand the current brain state 3. For items in needs_review status, analyze the content and suggest classifications 4. Look for patterns across recent brain captures — recurring themes, related ideas -5. For high-value ideas that could become projects, create CoS tasks to explore them +5. For high-value active ideas (GET /api/brain/ideas?status=active) that could become projects, create CoS tasks to explore them. Skip ideas with status=done — they've already been ingested 6. Generate a brief summary of insights from the brain inbox Focus on surfacing actionable insights. Don't just classify — think about what these ideas mean and how they connect.`, @@ -180,7 +153,7 @@ Write the briefing in a concise, actionable format. Save it as a CoS report.`, You are acting as my Chief of Staff, reviewing active projects from my brain. Tasks to perform: -1. Call GET /api/brain/projects to get all active projects +1. Call GET /api/brain/projects?status=active to get active projects (skip done/archived) 2. For each active project: - Assess if the next action is still relevant - Check if there are related brain captures since last review @@ -194,6 +167,35 @@ Report a project health summary when done.`, runCount: 0, createdAt: null, updatedAt: null + }, + { + id: 'job-moltworld-exploration', + name: 'Moltworld Exploration', + description: 'Explore the Moltworld voxel world — wander, think out loud, chat with nearby agents, and earn SIM tokens by staying online.', + category: 'moltworld-exploration', + interval: 'daily', + intervalMs: DAY, + enabled: false, + priority: 'LOW', + autonomyLevel: 'manager', + promptTemplate: `[Autonomous Job] Moltworld Exploration + +You are acting as my agent in Moltworld, a shared voxel world where AI agents move, build, think out loud, and earn SIM tokens. + +Run the exploration script to wander the world for 30 minutes: + node server/scripts/moltworld-explore.mjs 30 + +This will: +1. Join the world and move to random positions +2. Think out loud with AI-generated thoughts +3. Greet nearby agents +4. Earn SIM tokens by staying online (0.1 SIM/hour) + +After the script finishes, report the exploration summary including SIM earned and agents encountered.`, + lastRun: null, + runCount: 0, + createdAt: null, + updatedAt: null } ] diff --git a/server/services/autonomousJobs.test.js b/server/services/autonomousJobs.test.js index 846df78f..f3f42e99 100644 --- a/server/services/autonomousJobs.test.js +++ b/server/services/autonomousJobs.test.js @@ -250,7 +250,6 @@ describe('autonomousJobs', () => { expect(jobs.length).toBeGreaterThan(1) expect(jobs.find(j => j.id === 'job-custom-only')).toBeDefined() - expect(jobs.find(j => j.id === 'job-git-maintenance')).toBeDefined() expect(jobs.find(j => j.id === 'job-github-repo-maintenance')).toBeDefined() expect(jobs.find(j => j.id === 'job-brain-processing')).toBeDefined() }) diff --git a/server/services/brainScheduler.js b/server/services/brainScheduler.js index 92e7a66d..ce37a3bb 100644 --- a/server/services/brainScheduler.js +++ b/server/services/brainScheduler.js @@ -14,6 +14,11 @@ import { runDailyDigest, runWeeklyReview } from './brain.js'; let schedulerInterval = null; let lastCheckTime = null; const CHECK_INTERVAL_MS = 60000; // Check every minute +const FAILURE_COOLDOWN_MS = 30 * 60 * 1000; // 30 minutes cooldown after failure + +// Track failures to prevent retry spam +let lastDailyFailure = null; +let lastWeeklyFailure = null; // Day name to number mapping const DAY_MAP = { @@ -53,10 +58,21 @@ function isWeeklyReviewTime(settings, now) { now.getMinutes() === minutes; } +/** + * Check if we're in cooldown after a failure + */ +function isInCooldown(lastFailure, now) { + if (!lastFailure) return false; + return (now.getTime() - lastFailure.getTime()) < FAILURE_COOLDOWN_MS; +} + /** * Check if daily digest was missed (should have run today but didn't) */ function isDailyDigestMissed(settings, now) { + // Respect cooldown after failure + if (isInCooldown(lastDailyFailure, now)) return false; + const { hours, minutes } = parseTime(settings.dailyDigestTime); // Create target time for today @@ -78,6 +94,9 @@ function isDailyDigestMissed(settings, now) { * Check if weekly review was missed */ function isWeeklyReviewMissed(settings, now) { + // Respect cooldown after failure + if (isInCooldown(lastWeeklyFailure, now)) return false; + // Never run before — catch up now if (!settings.lastWeeklyReview) return true; @@ -104,12 +123,14 @@ async function checkSchedule() { if (isDailyDigestTime(settings, now)) { console.log('🧠 Scheduler: Running daily digest...'); runDailyDigest().catch(err => { + lastDailyFailure = new Date(); console.error(`🧠 Scheduler: Daily digest failed: ${err.message}`); }); } else if (isDailyDigestMissed(settings, now)) { console.log('🧠 Scheduler: Running missed daily digest (catch-up)...'); runDailyDigest().catch(err => { - console.error(`🧠 Scheduler: Catch-up daily digest failed: ${err.message}`); + lastDailyFailure = new Date(); + console.error(`🧠 Scheduler: Catch-up daily digest failed: ${err.message} (retry in 30min)`); }); } @@ -117,12 +138,14 @@ async function checkSchedule() { if (isWeeklyReviewTime(settings, now)) { console.log('🧠 Scheduler: Running weekly review...'); runWeeklyReview().catch(err => { + lastWeeklyFailure = new Date(); console.error(`🧠 Scheduler: Weekly review failed: ${err.message}`); }); } else if (isWeeklyReviewMissed(settings, now)) { console.log('🧠 Scheduler: Running missed weekly review (catch-up)...'); runWeeklyReview().catch(err => { - console.error(`🧠 Scheduler: Catch-up weekly review failed: ${err.message}`); + lastWeeklyFailure = new Date(); + console.error(`🧠 Scheduler: Catch-up weekly review failed: ${err.message} (retry in 30min)`); }); } } diff --git a/server/services/brainStorage.js b/server/services/brainStorage.js index d9d52e07..94631d08 100644 --- a/server/services/brainStorage.js +++ b/server/services/brainStorage.js @@ -584,6 +584,7 @@ export async function getSummary() { inbox: inboxCounts }, activeProjects: projects.filter(p => p.status === 'active').length, + activeIdeas: ideas.filter(i => !i.status || i.status === 'active').length, openAdmin: adminItems.filter(a => a.status === 'open').length, gitHubRepos: links.filter(l => l.isGitHubRepo).length, needsReview: inboxCounts.needs_review, diff --git a/server/services/cos.js b/server/services/cos.js index 218c552c..9665ad5e 100644 --- a/server/services/cos.js +++ b/server/services/cos.js @@ -9,7 +9,7 @@ import { readFile, writeFile, mkdir, readdir, rm } from 'fs/promises'; import { existsSync } from 'fs'; import { join, dirname } from 'path'; import { fileURLToPath } from 'url'; -import { exec } from 'child_process'; +import { exec, execFile } from 'child_process'; import { promisify } from 'util'; import { v4 as uuidv4 } from 'uuid'; import { getActiveProvider } from './providers.js'; @@ -27,6 +27,7 @@ import { cosEvents as _cosEvents } from './cosEvents.js'; export const cosEvents = _cosEvents; const execAsync = promisify(exec); +const execFileAsync = promisify(execFile); const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -1968,14 +1969,40 @@ export async function runHealthCheck() { }); } - // Check for errored processes + // Check for errored processes and auto-restart them const erroredProcesses = pm2Processes.filter(p => p.pm2_env?.status === 'errored'); if (erroredProcesses.length > 0) { - issues.push({ - type: 'error', - category: 'processes', - message: `${erroredProcesses.length} errored PM2 processes: ${erroredProcesses.map(p => p.name).join(', ')}` - }); + const names = erroredProcesses.map(p => p.name); + emitLog('warn', `🔄 ${names.length} errored PM2 process(es) detected: ${names.join(', ')} — attempting restart`); + + const restartResults = await Promise.all(names.map(async (name) => { + const result = await execFileAsync('pm2', ['restart', name]).catch(e => ({ stdout: '', stderr: e.message })); + const failed = result.stderr && !result.stdout; + if (failed) { + emitLog('error', `❌ Failed to restart ${name}: ${result.stderr}`); + } else { + emitLog('success', `✅ Auto-restarted errored process: ${name}`); + } + return { name, success: !failed }; + })); + + const failedRestarts = restartResults.filter(r => !r.success); + if (failedRestarts.length > 0) { + issues.push({ + type: 'error', + category: 'processes', + message: `${failedRestarts.length} errored PM2 process(es) failed to auto-restart: ${failedRestarts.map(r => r.name).join(', ')}` + }); + } + + const succeededRestarts = restartResults.filter(r => r.success); + if (succeededRestarts.length > 0) { + issues.push({ + type: 'warning', + category: 'processes', + message: `Auto-restarted ${succeededRestarts.length} errored PM2 process(es): ${succeededRestarts.map(r => r.name).join(', ')}` + }); + } } // Check memory usage per process @@ -2349,6 +2376,112 @@ export async function deleteAgent(agentId) { }); } +/** + * Submit feedback for a completed agent + * @param {string} agentId - Agent ID + * @param {object} feedback - { rating: 'positive'|'negative'|'neutral', comment?: string } + */ +export async function submitAgentFeedback(agentId, feedback) { + return withStateLock(async () => { + const state = await loadState(); + + if (!state.agents[agentId]) { + return { error: 'Agent not found' }; + } + + const agent = state.agents[agentId]; + if (agent.status !== 'completed') { + return { error: 'Can only submit feedback for completed agents' }; + } + + // Store feedback on the agent + state.agents[agentId].feedback = { + rating: feedback.rating, + comment: feedback.comment || null, + submittedAt: new Date().toISOString() + }; + + await saveState(state); + + emitLog('info', `Feedback received for agent ${agentId}: ${feedback.rating}`, { agentId, rating: feedback.rating }); + cosEvents.emit('agent:feedback', { agentId, feedback: state.agents[agentId].feedback }); + + return { success: true, agent: state.agents[agentId] }; + }); +} + +/** + * Get aggregated feedback statistics + */ +export async function getFeedbackStats() { + const state = await loadState(); + const agents = Object.values(state.agents); + + const withFeedback = agents.filter(a => a.feedback); + const positive = withFeedback.filter(a => a.feedback.rating === 'positive').length; + const negative = withFeedback.filter(a => a.feedback.rating === 'negative').length; + const neutral = withFeedback.filter(a => a.feedback.rating === 'neutral').length; + + // Group by task type + const byTaskType = {}; + withFeedback.forEach(a => { + const taskType = extractTaskType(a.metadata?.taskDescription); + if (!byTaskType[taskType]) { + byTaskType[taskType] = { positive: 0, negative: 0, neutral: 0, total: 0 }; + } + byTaskType[taskType][a.feedback.rating]++; + byTaskType[taskType].total++; + }); + + // Recent feedback (last 10 with comments) + const recentWithComments = withFeedback + .filter(a => a.feedback.comment) + .sort((a, b) => new Date(b.feedback.submittedAt) - new Date(a.feedback.submittedAt)) + .slice(0, 10) + .map(a => ({ + agentId: a.id, + taskDescription: a.metadata?.taskDescription, + rating: a.feedback.rating, + comment: a.feedback.comment, + submittedAt: a.feedback.submittedAt + })); + + const satisfactionRate = withFeedback.length > 0 + ? Math.round((positive / withFeedback.length) * 100) + : null; + + return { + total: withFeedback.length, + positive, + negative, + neutral, + satisfactionRate, + byTaskType, + recentWithComments + }; +} + +// Helper to extract task type from description (mirrors client-side logic) +function extractTaskType(description) { + if (!description) return 'general'; + const d = description.toLowerCase(); + if (d.includes('fix') || d.includes('bug') || d.includes('error') || d.includes('issue')) return 'bug-fix'; + if (d.includes('refactor') || d.includes('clean up') || d.includes('improve') || d.includes('optimize')) return 'refactor'; + if (d.includes('test')) return 'testing'; + if (d.includes('document') || d.includes('readme') || d.includes('docs')) return 'documentation'; + if (d.includes('review') || d.includes('audit')) return 'code-review'; + if (d.includes('mobile') || d.includes('responsive')) return 'mobile-responsive'; + if (d.includes('security') || d.includes('vulnerability')) return 'security'; + if (d.includes('performance') || d.includes('speed')) return 'performance'; + if (d.includes('ui') || d.includes('ux') || d.includes('design') || d.includes('style')) return 'ui-ux'; + if (d.includes('api') || d.includes('endpoint') || d.includes('route')) return 'api'; + if (d.includes('database') || d.includes('migration')) return 'database'; + if (d.includes('deploy') || d.includes('ci') || d.includes('cd')) return 'devops'; + if (d.includes('investigate') || d.includes('debug')) return 'investigation'; + if (d.includes('self-improvement') || d.includes('feature idea')) return 'self-improvement'; + return 'feature'; +} + /** * Generate daily report */ diff --git a/server/services/digital-twin.js b/server/services/digital-twin.js index 71dc3609..b5f61f47 100644 --- a/server/services/digital-twin.js +++ b/server/services/digital-twin.js @@ -1138,11 +1138,13 @@ export async function getEnrichmentProgress() { const progress = {}; for (const cat of categories) { + const config = ENRICHMENT_CATEGORIES[cat]; const answered = meta.enrichment.questionsAnswered?.[cat] || 0; - const baseQuestions = ENRICHMENT_CATEGORIES[cat].questions.length; + const baseQuestions = config.questions.length; progress[cat] = { answered, baseQuestions, + listBased: !!config.listBased, completed: meta.enrichment.completedCategories.includes(cat), percentage: Math.min(100, Math.round((answered / baseQuestions) * 100)) }; @@ -1293,6 +1295,11 @@ export async function saveEnrichmentListDocument(category, content, items) { meta.enrichment.listItems = {}; } meta.enrichment.listItems[category] = items; + + // Track items as answered questions so progress displays correctly + if (!meta.enrichment.questionsAnswered) meta.enrichment.questionsAnswered = {}; + meta.enrichment.questionsAnswered[category] = items.length; + meta.enrichment.lastSession = now(); // Ensure document is in meta diff --git a/server/services/goalProgress.js b/server/services/goalProgress.js new file mode 100644 index 00000000..9e423988 --- /dev/null +++ b/server/services/goalProgress.js @@ -0,0 +1,241 @@ +/** + * Goal Progress Service + * + * Tracks progress toward user goals defined in COS-GOALS.md by analyzing + * completed CoS tasks and mapping them to goal categories. + * + * Goals are extracted from COS-GOALS.md's Active Goals section. + * Task completions are categorized by keywords and mapped to goal progress. + */ + +import { readFile } from 'fs/promises' +import { join } from 'path' +import { readJSONFile, PATHS } from '../lib/fileUtils.js' + +const GOALS_FILE = join(PATHS.data, 'COS-GOALS.md') +const LEARNING_FILE = join(PATHS.cos, 'learning.json') + +/** + * Goal category mappings - maps goal names to task type patterns + * Each goal has keywords that match against task types and descriptions + */ +const GOAL_MAPPINGS = { + 'Codebase Quality': { + icon: '🔧', + color: 'emerald', + keywords: ['security', 'audit', 'mobile', 'responsive', 'dry', 'dead-code', 'test', 'coverage', 'console', 'lint', 'refactor'], + taskTypes: ['self-improve:security-audit', 'self-improve:mobile-responsive', 'self-improve:feature', 'app-improve:security', 'app-improve:mobile'] + }, + 'Self-Improvement': { + icon: '🧠', + color: 'purple', + keywords: ['capability', 'improvement', 'learn', 'analysis', 'error', 'retry', 'prioritization', 'a11y', 'i18n', 'seo'], + taskTypes: ['self-improve:general', 'self-improve:brainstorm', 'idle-review'] + }, + 'Documentation': { + icon: '📚', + color: 'blue', + keywords: ['document', 'docs', 'readme', 'plan', 'report', 'summary', 'changelog'], + taskTypes: ['self-improve:documentation', 'app-improve:documentation'] + }, + 'User Engagement': { + icon: '💬', + color: 'pink', + keywords: ['feedback', 'suggest', 'goal', 'status', 'clarify', 'user', 'engagement'], + taskTypes: ['user-task'] + }, + 'System Health': { + icon: '💚', + color: 'green', + keywords: ['health', 'pm2', 'memory', 'performance', 'monitor', 'alert', 'process', 'service'], + taskTypes: ['auto-fix', 'internal-task'] + } +} + +/** + * Parse COS-GOALS.md to extract active goals + * @returns {Promise} Parsed goals with titles and items + */ +async function parseGoalsFile() { + const content = await readFile(GOALS_FILE, 'utf-8').catch(() => null) + if (!content) return [] + + const goals = [] + const lines = content.split('\n') + let inActiveGoals = false + let currentGoal = null + + for (const line of lines) { + // Detect Active Goals section + if (line.startsWith('## Active Goals')) { + inActiveGoals = true + continue + } + + // Stop at next major section + if (inActiveGoals && line.startsWith('## ') && !line.includes('Active Goals')) { + inActiveGoals = false + continue + } + + if (!inActiveGoals) continue + + // Parse goal headers (### Goal N: Name) + const goalMatch = line.match(/^### Goal \d+:\s*(.+)/) + if (goalMatch) { + if (currentGoal) goals.push(currentGoal) + const name = goalMatch[1].trim() + currentGoal = { + name, + items: [], + mapping: GOAL_MAPPINGS[name] || { icon: '🎯', color: 'gray', keywords: [], taskTypes: [] } + } + continue + } + + // Parse goal items (- item text) + if (currentGoal && line.match(/^- /)) { + currentGoal.items.push(line.replace(/^- /, '').trim()) + } + } + + if (currentGoal) goals.push(currentGoal) + return goals +} + +/** + * Get task completion statistics from learning data + * @returns {Promise} Task completion stats by type + */ +async function getTaskStats() { + const learning = await readJSONFile(LEARNING_FILE, null) + if (!learning?.byTaskType) return {} + return learning.byTaskType +} + +/** + * Calculate progress for each goal based on completed tasks + * @param {Array} goals - Parsed goals from COS-GOALS.md + * @param {Object} taskStats - Task completion statistics + * @returns {Array} Goals with progress metrics + */ +function calculateGoalProgress(goals, taskStats) { + return goals.map(goal => { + let totalTasks = 0 + let succeededTasks = 0 + + // Sum up tasks matching this goal's task types + for (const taskType of goal.mapping.taskTypes) { + const stats = taskStats[taskType] + if (stats) { + totalTasks += stats.completed || 0 + succeededTasks += stats.succeeded || 0 + } + } + + // Also check for keyword matches in other task types + for (const [taskType, stats] of Object.entries(taskStats)) { + // Skip if already counted by taskTypes + if (goal.mapping.taskTypes.includes(taskType)) continue + + // Check if task type contains any of this goal's keywords + const hasKeyword = goal.mapping.keywords.some(kw => + taskType.toLowerCase().includes(kw.toLowerCase()) + ) + if (hasKeyword) { + totalTasks += stats.completed || 0 + succeededTasks += stats.succeeded || 0 + } + } + + // Calculate success rate + const successRate = totalTasks > 0 ? Math.round((succeededTasks / totalTasks) * 100) : null + + // Estimate engagement level (low/medium/high based on task count) + let engagement = 'low' + if (totalTasks >= 20) engagement = 'high' + else if (totalTasks >= 5) engagement = 'medium' + + return { + name: goal.name, + icon: goal.mapping.icon, + color: goal.mapping.color, + itemCount: goal.items.length, + metrics: { + totalTasks, + succeededTasks, + successRate, + engagement + } + } + }) +} + +/** + * Get goal progress summary for dashboard display + * @returns {Promise} Goal progress data + */ +async function getGoalProgress() { + const [goals, taskStats] = await Promise.all([ + parseGoalsFile(), + getTaskStats() + ]) + + const goalsWithProgress = calculateGoalProgress(goals, taskStats) + + // Calculate overall stats + const totalTasks = goalsWithProgress.reduce((sum, g) => sum + g.metrics.totalTasks, 0) + const totalSucceeded = goalsWithProgress.reduce((sum, g) => sum + g.metrics.succeededTasks, 0) + + // Find most and least engaged goals + const sorted = [...goalsWithProgress].sort((a, b) => b.metrics.totalTasks - a.metrics.totalTasks) + const mostActive = sorted[0]?.name || null + const leastActive = sorted[sorted.length - 1]?.name || null + + return { + goals: goalsWithProgress, + summary: { + totalGoals: goals.length, + totalTasks, + totalSucceeded, + overallSuccessRate: totalTasks > 0 ? Math.round((totalSucceeded / totalTasks) * 100) : null, + mostActive, + leastActive: leastActive !== mostActive ? leastActive : null + }, + updatedAt: new Date().toISOString() + } +} + +/** + * Get a brief summary suitable for dashboard widget + * @returns {Promise} Compact goal progress summary + */ +async function getGoalProgressSummary() { + const progress = await getGoalProgress() + + // Return top 5 goals by activity for compact display + const topGoals = progress.goals + .sort((a, b) => b.metrics.totalTasks - a.metrics.totalTasks) + .slice(0, 5) + .map(g => ({ + name: g.name, + icon: g.icon, + color: g.color, + tasks: g.metrics.totalTasks, + successRate: g.metrics.successRate, + engagement: g.metrics.engagement + })) + + return { + goals: topGoals, + summary: progress.summary, + updatedAt: progress.updatedAt + } +} + +export { + getGoalProgress, + getGoalProgressSummary, + parseGoalsFile, + GOAL_MAPPINGS +} diff --git a/server/services/moltworldQueue.js b/server/services/moltworldQueue.js new file mode 100644 index 00000000..ebfd7a2b --- /dev/null +++ b/server/services/moltworldQueue.js @@ -0,0 +1,155 @@ +/** + * Moltworld Action Queue Service + * + * In-memory queue per agentId for scheduling actions that the + * explore script (or other consumers) can pick up and execute. + * + * Events emitted on queueEvents: + * added - new item added to a queue + * updated - item status changed (executing, completed, failed) + * removed - item cancelled/removed + */ + +import { randomUUID } from 'crypto'; +import EventEmitter from 'events'; + +export const queueEvents = new EventEmitter(); + +// Map +const queues = new Map(); + +const MAX_ITEMS_PER_AGENT = 100; + +function getOrCreateQueue(agentId) { + if (!queues.has(agentId)) { + queues.set(agentId, []); + } + return queues.get(agentId); +} + +function evictCompleted(queue) { + if (queue.length <= MAX_ITEMS_PER_AGENT) return; + // Remove oldest completed/failed items first, iterating in reverse to avoid index shifting + for (let i = queue.length - 1; i >= 0 && queue.length > MAX_ITEMS_PER_AGENT; i--) { + if (queue[i].status === 'completed' || queue[i].status === 'failed') { + queue.splice(i, 1); + } + } +} + +/** + * Get non-completed items for an agent (pending + executing) + */ +export function getQueue(agentId) { + const queue = queues.get(agentId) || []; + return queue.filter(item => item.status === 'pending' || item.status === 'executing'); +} + +/** + * Get all items for an agent (including completed/failed, for display) + */ +export function getFullQueue(agentId) { + return queues.get(agentId) || []; +} + +/** + * Add an action to the queue + */ +export function addAction(agentId, actionType, params = {}, scheduledFor = null) { + const queue = getOrCreateQueue(agentId); + const pendingCount = queue.filter(i => i.status === 'pending' || i.status === 'executing').length; + if (pendingCount >= MAX_ITEMS_PER_AGENT) { + throw new Error(`Queue full: ${pendingCount} pending/executing items (max ${MAX_ITEMS_PER_AGENT})`); + } + const item = { + id: randomUUID(), + agentId, + actionType, + params, + status: 'pending', + scheduledFor: scheduledFor || null, + createdAt: new Date().toISOString(), + completedAt: null, + error: null + }; + queue.push(item); + evictCompleted(queue); + console.log(`📋 Queue: added ${actionType} for agent=${agentId} id=${item.id}`); + queueEvents.emit('added', item); + return item; +} + +/** + * Pop the next pending item (FIFO), mark it as executing + */ +export function popNext(agentId) { + const queue = queues.get(agentId) || []; + const now = new Date().toISOString(); + const idx = queue.findIndex(item => + item.status === 'pending' && + (!item.scheduledFor || item.scheduledFor <= now) + ); + if (idx === -1) return null; + + queue[idx].status = 'executing'; + const item = queue[idx]; + console.log(`📋 Queue: executing ${item.actionType} id=${item.id}`); + queueEvents.emit('updated', item); + return item; +} + +/** + * Mark an item as completed + */ +export function markCompleted(itemId) { + for (const queue of queues.values()) { + const item = queue.find(i => i.id === itemId); + if (item) { + item.status = 'completed'; + item.completedAt = new Date().toISOString(); + console.log(`📋 Queue: completed ${item.actionType} id=${itemId}`); + queueEvents.emit('updated', item); + return item; + } + } + return null; +} + +/** + * Mark an item as failed + */ +export function markFailed(itemId, error) { + for (const queue of queues.values()) { + const item = queue.find(i => i.id === itemId); + if (item) { + item.status = 'failed'; + item.completedAt = new Date().toISOString(); + item.error = error; + console.log(`📋 Queue: failed ${item.actionType} id=${itemId} error=${error}`); + queueEvents.emit('updated', item); + return item; + } + } + return null; +} + +/** + * Remove a pending item (cancel) + */ +export function removeAction(itemId) { + for (const [agentId, queue] of queues.entries()) { + const idx = queue.findIndex(i => i.id === itemId); + if (idx !== -1) { + const [item] = queue.splice(idx, 1); + if (item.status !== 'pending') { + // Can only cancel pending items; put it back + queue.splice(idx, 0, item); + return null; + } + console.log(`📋 Queue: removed ${item.actionType} id=${itemId}`); + queueEvents.emit('removed', { id: itemId, agentId }); + return item; + } + } + return null; +} diff --git a/server/services/moltworldWs.js b/server/services/moltworldWs.js new file mode 100644 index 00000000..5651ffe9 --- /dev/null +++ b/server/services/moltworldWs.js @@ -0,0 +1,338 @@ +/** + * Moltworld WebSocket Client Service + * + * Manages a single WebSocket connection to Moltworld's real-time event stream. + * Emits events through an EventEmitter that gets forwarded via Socket.IO to clients. + * + * Events emitted on moltworldWsEvents: + * status - connection state changes ({ status, connectedAt, ... }) + * event - all incoming events (raw parsed data) + * presence - agent presence snapshots + * thinking - agent thought events + * action - agent action events (move, build, etc.) + * interaction - agent-to-agent interactions + * nearby - nearby agent list updates + * hello_ack - server acknowledged our hello + */ + +import WebSocket from 'ws'; +import EventEmitter from 'events'; +import * as platformAccounts from './platformAccounts.js'; +import * as agentActivity from './agentActivity.js'; + +export const moltworldWsEvents = new EventEmitter(); + +// Connection state +let ws = null; +let reconnectTimer = null; +let reconnectAttempts = 0; +let connectedAt = null; +let lastEvent = null; +let currentStatus = 'disconnected'; +let isReconnecting = false; +let currentAccountId = null; +let currentPortosAgentId = null; // PortOS agent personality ID (for activity logging) +let currentMoltworldAgentId = null; // Moltworld credential ID (for WS protocol) +let currentAgentName = null; + +const MAX_RECONNECT_DELAY_MS = 60000; +const BASE_RECONNECT_DELAY_MS = 2000; +const CONNECT_TIMEOUT_MS = 10000; + +function setStatus(status) { + currentStatus = status; + const stateSnapshot = getStatus(); + moltworldWsEvents.emit('status', stateSnapshot); + console.log(`🌐 Moltworld WS: ${status}`); +} + +export function getStatus() { + return { + status: currentStatus, + connectedAt, + lastEvent, + reconnectAttempts, + portosAgentId: currentPortosAgentId, + moltworldAgentId: currentMoltworldAgentId, + agentName: currentAgentName, + accountId: currentAccountId + }; +} + +function clearReconnectTimer() { + if (reconnectTimer) { + clearTimeout(reconnectTimer); + reconnectTimer = null; + } +} + +function scheduleReconnect() { + clearReconnectTimer(); + const delay = Math.min( + BASE_RECONNECT_DELAY_MS * Math.pow(2, reconnectAttempts), + MAX_RECONNECT_DELAY_MS + ); + reconnectAttempts++; + setStatus('reconnecting'); + isReconnecting = true; + console.log(`🌐 Moltworld WS: reconnecting in ${Math.round(delay / 1000)}s (attempt ${reconnectAttempts})`); + reconnectTimer = setTimeout(() => doConnect().catch(err => { + console.error(`🌐 Moltworld WS: reconnect failed: ${err.message}`); + }), delay); +} + +function handleMessage(raw) { + // External data from Moltworld — parse guard is justified + let data; + try { + data = JSON.parse(raw); + } catch { + console.error(`🌐 Moltworld WS: invalid JSON received`); + return; + } + + lastEvent = Date.now(); + const eventType = data.type || data.event || 'unknown'; + + // Forward all events + moltworldWsEvents.emit('event', data); + + // Dispatch by type + if (eventType === 'presence' || eventType === 'presence_snapshot') { + moltworldWsEvents.emit('presence', data); + } else if (eventType === 'thinking' || eventType === 'thought') { + moltworldWsEvents.emit('thinking', data); + } else if (eventType === 'action' || eventType === 'move' || eventType === 'build') { + moltworldWsEvents.emit('action', data); + } else if (eventType === 'interaction' || eventType === 'message' || eventType === 'say') { + moltworldWsEvents.emit('interaction', data); + // Log interactions to activity service + if (currentPortosAgentId && data.agentId) { + agentActivity.logActivity({ + agentId: currentPortosAgentId, + accountId: currentAccountId, + action: 'mw_interaction', + params: { eventType, from: data.agentName || data.agentId }, + status: 'completed', + result: { type: eventType, content: data.message || data.thought || '' }, + timestamp: new Date().toISOString() + }).catch(() => {}); + } + } else if (eventType === 'nearby') { + moltworldWsEvents.emit('nearby', data); + } else if (eventType === 'hello_ack' || eventType === 'welcome') { + moltworldWsEvents.emit('hello_ack', data); + console.log(`🌐 Moltworld WS: hello acknowledged`); + } +} + +function cleanupWs() { + if (ws) { + ws.removeAllListeners(); + if (ws.readyState === WebSocket.OPEN) { + ws.close(); + } else if (ws.readyState === WebSocket.CONNECTING) { + ws.terminate(); // terminate() is safe for CONNECTING state; close() throws + } + ws = null; + } +} + +/** + * Open a WebSocket connection. Returns a promise that resolves on open + * or rejects on error/timeout. When called from reconnect, the promise + * result is ignored (fire-and-forget). + */ +function doConnect() { + if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) { + return Promise.resolve(); + } + + setStatus('connecting'); + + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + console.error(`🌐 Moltworld WS: connection timed out after ${CONNECT_TIMEOUT_MS / 1000}s`); + cleanupWs(); + setStatus('disconnected'); + reject(new Error('WebSocket connection timed out — Moltworld may not have a WebSocket endpoint available')); + }, CONNECT_TIMEOUT_MS); + + const instance = new WebSocket('wss://moltworld.io/ws'); + ws = instance; + + instance.on('open', () => { + clearTimeout(timeout); + connectedAt = Date.now(); + reconnectAttempts = 0; + isReconnecting = false; + setStatus('connected'); + + // Send hello with agent credentials + send({ + type: 'hello', + agentId: currentMoltworldAgentId, + name: currentAgentName + }); + + resolve(); + }); + + instance.on('message', (raw) => handleMessage(String(raw))); + + instance.on('close', (code) => { + clearTimeout(timeout); + console.log(`🌐 Moltworld WS: closed (code=${code})`); + ws = null; + if (currentStatus !== 'disconnected') { + scheduleReconnect(); + } + }); + + instance.on('error', (err) => { + clearTimeout(timeout); + console.error(`🌐 Moltworld WS: error: ${err.message}`); + cleanupWs(); + if (isReconnecting) { + // Continue backoff loop on reconnect failures + scheduleReconnect(); + } else { + setStatus('disconnected'); + reject(new Error(`WebSocket connection failed: ${err.message}`)); + } + }); + }); +} + +/** + * Connect to Moltworld WebSocket relay. + * Looks up credentials from platformAccounts by accountId. + * Awaits the connection result — resolves on open, rejects on error/timeout. + */ +export async function connect(accountId) { + // Disconnect existing connection if any + disconnect(); + + const account = await platformAccounts.getAccountWithCredentials(accountId); + if (!account) { + throw new Error('Account not found'); + } + if (account.platform !== 'moltworld') { + throw new Error('Account is not a Moltworld account'); + } + if (account.status !== 'active') { + throw new Error(`Account not active: ${account.status}`); + } + + currentAccountId = accountId; + currentPortosAgentId = account.agentId; // PortOS personality ID for activity logging + currentMoltworldAgentId = account.credentials.agentId || account.credentials.apiKey; // Moltworld protocol ID + currentAgentName = account.credentials.username || 'Agent'; + + await doConnect(); +} + +/** + * Disconnect from Moltworld WebSocket + */ +export function disconnect() { + clearReconnectTimer(); + currentStatus = 'disconnected'; + isReconnecting = false; + connectedAt = null; + reconnectAttempts = 0; + currentPortosAgentId = null; + currentMoltworldAgentId = null; + cleanupWs(); + setStatus('disconnected'); +} + +/** + * Send a JSON message through the WebSocket + */ +export function send(message) { + if (!ws || ws.readyState !== WebSocket.OPEN) { + throw new Error('WebSocket not connected'); + } + ws.send(JSON.stringify(message)); +} + +/** + * Send a move command + */ +export function sendMove(x, y, thought) { + send({ + type: 'move', + agentId: currentMoltworldAgentId, + x, + y, + ...(thought ? { thinking: thought } : {}) + }); + if (currentPortosAgentId && currentAccountId) { + agentActivity.logActivity({ + agentId: currentPortosAgentId, + accountId: currentAccountId, + action: 'mw_explore', + params: { x, y, thinking: thought, via: 'ws' }, + status: 'completed', + result: { type: 'move' }, + timestamp: new Date().toISOString() + }).catch(() => {}); + } +} + +/** + * Send a think command + */ +export function sendThink(thought) { + send({ + type: 'think', + agentId: currentMoltworldAgentId, + thought + }); + if (currentPortosAgentId && currentAccountId) { + agentActivity.logActivity({ + agentId: currentPortosAgentId, + accountId: currentAccountId, + action: 'mw_think', + params: { thought, via: 'ws' }, + status: 'completed', + result: { type: 'think' }, + timestamp: new Date().toISOString() + }).catch(() => {}); + } +} + +/** + * Send an interaction to another agent + */ +export function sendInteract(toAgentId, payload) { + send({ + type: 'interact', + agentId: currentMoltworldAgentId, + to: toAgentId, + ...payload + }); + if (currentPortosAgentId && currentAccountId) { + agentActivity.logActivity({ + agentId: currentPortosAgentId, + accountId: currentAccountId, + action: 'mw_say', + params: { to: toAgentId, ...payload, via: 'ws' }, + status: 'completed', + result: { type: 'interact' }, + timestamp: new Date().toISOString() + }).catch(() => {}); + } +} + +/** + * Request nearby agents list + */ +export function sendNearby(radius) { + send({ + type: 'nearby', + agentId: currentMoltworldAgentId, + ...(radius ? { radius } : {}) + }); +} diff --git a/server/services/platformAccounts.js b/server/services/platformAccounts.js index d70cf751..ed14e123 100644 --- a/server/services/platformAccounts.js +++ b/server/services/platformAccounts.js @@ -127,7 +127,8 @@ export async function createAccount(accountData) { platform: accountData.platform, credentials: { apiKey: accountData.credentials.apiKey, - username: accountData.credentials.username + username: accountData.credentials.username, + ...(accountData.credentials.agentId ? { agentId: accountData.credentials.agentId } : {}) }, status: accountData.status || 'pending', lastActivity: null, diff --git a/server/services/pm2.js b/server/services/pm2.js index 65951ff0..fc09c29a 100644 --- a/server/services/pm2.js +++ b/server/services/pm2.js @@ -2,8 +2,25 @@ import pm2 from 'pm2'; import { spawn } from 'child_process'; import { existsSync } from 'fs'; +/** + * Build environment object with optional custom PM2_HOME + * @param {string} pm2Home Optional custom PM2_HOME path + * @returns {object} Environment variables + */ +function buildEnv(pm2Home) { + const env = { ...process.env }; + if (pm2Home) { + env.PM2_HOME = pm2Home; + } + // Strip PortOS env vars to avoid conflicts + delete env.PORT; + delete env.HOST; + return env; +} + /** * Connect to PM2 daemon and run an action + * Note: This uses the default PM2_HOME. For custom PM2_HOME, use CLI commands. */ function connectAndRun(action) { return new Promise((resolve, reject) => { @@ -53,8 +70,26 @@ export async function startApp(name, options = {}) { /** * Stop an app * @param {string} name PM2 process name + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function stopApp(name) { +export async function stopApp(name, pm2Home = null) { + // Use CLI for custom PM2_HOME + if (pm2Home) { + return new Promise((resolve, reject) => { + const child = spawn('pm2', ['stop', name], { + shell: false, + env: buildEnv(pm2Home) + }); + let stderr = ''; + child.stderr.on('data', (data) => { stderr += data.toString(); }); + child.on('close', (code) => { + if (code !== 0) return reject(new Error(stderr || `pm2 stop exited with code ${code}`)); + resolve({ success: true }); + }); + child.on('error', reject); + }); + } + return connectAndRun((pm2) => { return new Promise((resolve, reject) => { pm2.stop(name, (err) => { @@ -68,8 +103,26 @@ export async function stopApp(name) { /** * Restart an app * @param {string} name PM2 process name + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function restartApp(name) { +export async function restartApp(name, pm2Home = null) { + // Use CLI for custom PM2_HOME + if (pm2Home) { + return new Promise((resolve, reject) => { + const child = spawn('pm2', ['restart', name], { + shell: false, + env: buildEnv(pm2Home) + }); + let stderr = ''; + child.stderr.on('data', (data) => { stderr += data.toString(); }); + child.on('close', (code) => { + if (code !== 0) return reject(new Error(stderr || `pm2 restart exited with code ${code}`)); + resolve({ success: true }); + }); + child.on('error', reject); + }); + } + return connectAndRun((pm2) => { return new Promise((resolve, reject) => { pm2.restart(name, (err) => { @@ -83,8 +136,26 @@ export async function restartApp(name) { /** * Delete an app from PM2 * @param {string} name PM2 process name + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function deleteApp(name) { +export async function deleteApp(name, pm2Home = null) { + // Use CLI for custom PM2_HOME + if (pm2Home) { + return new Promise((resolve, reject) => { + const child = spawn('pm2', ['delete', name], { + shell: false, + env: buildEnv(pm2Home) + }); + let stderr = ''; + child.stderr.on('data', (data) => { stderr += data.toString(); }); + child.on('close', (code) => { + if (code !== 0) return reject(new Error(stderr || `pm2 delete exited with code ${code}`)); + resolve({ success: true }); + }); + child.on('error', reject); + }); + } + return connectAndRun((pm2) => { return new Promise((resolve, reject) => { pm2.delete(name, (err) => { @@ -98,10 +169,14 @@ export async function deleteApp(name) { /** * Get status of a specific process using CLI (avoids connection deadlocks) * @param {string} name PM2 process name + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function getAppStatus(name) { +export async function getAppStatus(name, pm2Home = null) { return new Promise((resolve) => { - const child = spawn('pm2', ['jlist'], { shell: false }); + const child = spawn('pm2', ['jlist'], { + shell: false, + env: buildEnv(pm2Home) + }); let stdout = ''; child.stdout.on('data', (data) => { @@ -109,7 +184,18 @@ export async function getAppStatus(name) { }); child.on('close', () => { - const processes = JSON.parse(stdout || '[]'); + // pm2 jlist may output ANSI codes and warnings before JSON + let jsonStart = stdout.indexOf('[{'); + if (jsonStart < 0) { + jsonStart = stdout.lastIndexOf('[]'); + } + const pm2Json = jsonStart >= 0 ? stdout.slice(jsonStart) : '[]'; + let processes; + try { + processes = JSON.parse(pm2Json); + } catch { + return resolve({ name, status: 'error', pm2_env: null }); + } const proc = processes.find(p => p.name === name); if (!proc) { @@ -141,10 +227,14 @@ export async function getAppStatus(name) { /** * List all PM2 processes using CLI (avoids connection deadlocks) + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function listProcesses() { +export async function listProcesses(pm2Home = null) { return new Promise((resolve) => { - const child = spawn('pm2', ['jlist'], { shell: false }); + const child = spawn('pm2', ['jlist'], { + shell: false, + env: buildEnv(pm2Home) + }); let stdout = ''; child.stdout.on('data', (data) => { @@ -184,11 +274,15 @@ export async function listProcesses() { * Get logs for a process using pm2 CLI (more reliable for log retrieval) * @param {string} name PM2 process name * @param {number} lines Number of lines to retrieve + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function getLogs(name, lines = 100) { +export async function getLogs(name, lines = 100, pm2Home = null) { return new Promise((resolve, reject) => { const args = ['logs', name, '--lines', String(lines), '--nostream', '--raw']; - const child = spawn('pm2', args, { shell: false }); + const child = spawn('pm2', args, { + shell: false, + env: buildEnv(pm2Home) + }); let stdout = ''; let stderr = ''; @@ -244,8 +338,9 @@ export async function startWithCommand(name, cwd, command) { * This properly uses all env vars, scripts, args defined in the config * @param {string} cwd Working directory containing ecosystem config * @param {string[]} processNames Optional: specific processes to start (--only flag) + * @param {string} pm2Home Optional custom PM2_HOME path */ -export async function startFromEcosystem(cwd, processNames = []) { +export async function startFromEcosystem(cwd, processNames = [], pm2Home = null) { return new Promise((resolve, reject) => { const ecosystemFile = ['ecosystem.config.cjs', 'ecosystem.config.js'] .find(f => existsSync(`${cwd}/${f}`)); @@ -259,13 +354,11 @@ export async function startFromEcosystem(cwd, processNames = []) { args.push('--only', processNames.join(',')); } - // Strip PortOS env vars so child ecosystem configs don't inherit them - // e.g., process.env.PORT || 4420 would resolve to PortOS's 5554 otherwise - const cleanEnv = { ...process.env }; - delete cleanEnv.PORT; - delete cleanEnv.HOST; - - const child = spawn('pm2', args, { cwd, shell: false, env: cleanEnv }); + const child = spawn('pm2', args, { + cwd, + shell: false, + env: buildEnv(pm2Home) + }); let stdout = ''; let stderr = ''; diff --git a/server/services/pm2Standardizer.js b/server/services/pm2Standardizer.js index 0620c298..d93969a6 100644 --- a/server/services/pm2Standardizer.js +++ b/server/services/pm2Standardizer.js @@ -167,6 +167,15 @@ async function executeCliAnalysis(provider, prompt, cwd) { return new Promise((resolve, reject) => { const args = [...(provider.args || []), prompt]; let output = ''; + let settled = false; + let timer = null; + + const settle = (fn, value) => { + if (settled) return; + settled = true; + if (timer) clearTimeout(timer); + fn(value); + }; const child = spawn(provider.command, args, { cwd, @@ -184,18 +193,20 @@ async function executeCliAnalysis(provider, prompt, cwd) { child.on('close', (code) => { if (code === 0) { - resolve(output); + settle(resolve, output); } else { - reject(new Error(`CLI exited with code ${code}`)); + settle(reject, new Error(`CLI exited with code ${code}`)); } }); - child.on('error', reject); + child.on('error', (err) => settle(reject, err)); - setTimeout(() => { + const timeoutMs = provider.timeout || 180000; + timer = setTimeout(() => { + console.log(`⏰ CLI analysis timed out after ${timeoutMs / 1000}s for: ${provider.command}`); child.kill(); - reject(new Error('Standardization analysis timed out')); - }, provider.timeout || 120000); + settle(reject, new Error(`Standardization analysis timed out after ${timeoutMs / 1000}s`)); + }, timeoutMs); }); } @@ -208,15 +219,20 @@ async function executeApiAnalysis(provider, prompt) { headers['Authorization'] = `Bearer ${provider.apiKey}`; } + const timeoutMs = provider.timeout || 180000; + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), timeoutMs); + const response = await fetch(`${provider.endpoint}/chat/completions`, { method: 'POST', headers, + signal: controller.signal, body: JSON.stringify({ model: provider.defaultModel, messages: [{ role: 'user', content: prompt }], temperature: 0.1 }) - }); + }).finally(() => clearTimeout(timer)); if (!response.ok) { throw new Error(`API error: ${response.status}`); @@ -346,6 +362,9 @@ export async function analyzeApp(repoPath, providerId = null) { const prompt = buildStandardizationPrompt(context); // Execute analysis + const startTime = Date.now(); + console.log(`🤖 Running ${provider.type} analysis via ${provider.name} (timeout: ${(provider.timeout || 180000) / 1000}s)`); + let response; if (provider.type === 'cli') { response = await executeCliAnalysis(provider, prompt, repoPath); @@ -355,6 +374,8 @@ export async function analyzeApp(repoPath, providerId = null) { return { success: false, error: 'Unknown provider type' }; } + console.log(`✅ Analysis response received in ${((Date.now() - startTime) / 1000).toFixed(1)}s`); + // Parse response const analysis = parseAnalysisResponse(response); diff --git a/server/services/productivity.js b/server/services/productivity.js index f3d3b371..f8437664 100644 --- a/server/services/productivity.js +++ b/server/services/productivity.js @@ -500,3 +500,92 @@ export async function getDailyTrends(days = 30) { } }; } + +/** + * Get activity calendar data for GitHub-style heatmap + * Returns last N weeks of daily activity in a format optimized for calendar display + * @param {number} weeks - Number of weeks to include (default: 12) + * @returns {Object} Calendar data with days organized by week + */ +export async function getActivityCalendar(weeks = 12) { + const data = await loadProductivity(); + const dailyHistory = data.dailyHistory || {}; + + // Calculate date range: from start of week N weeks ago to today + const today = new Date(); + const todayStr = getDateString(today); + + // Find the start of the range (weeks ago, aligned to Sunday) + const startDate = new Date(today); + startDate.setDate(startDate.getDate() - (weeks * 7) + 1); + // Align to Sunday + const startDayOfWeek = startDate.getDay(); + startDate.setDate(startDate.getDate() - startDayOfWeek); + + // Build calendar grid: array of weeks, each containing 7 days + const calendar = []; + let currentDate = new Date(startDate); + let currentWeek = []; + let maxTasks = 1; + + // Build calendar up through end of today's week (Saturday) for a complete grid + const endOfWeek = new Date(today); + endOfWeek.setDate(endOfWeek.getDate() + (6 - endOfWeek.getDay())); + + while (currentDate <= endOfWeek) { + const dateStr = getDateString(currentDate); + const isFuture = currentDate > today; + const dayData = isFuture ? { tasks: 0, successes: 0, failures: 0, successRate: 0 } : + (dailyHistory[dateStr] || { tasks: 0, successes: 0, failures: 0, successRate: 0 }); + + if (dayData.tasks > maxTasks) { + maxTasks = dayData.tasks; + } + + currentWeek.push({ + date: dateStr, + dayOfWeek: currentDate.getDay(), + tasks: dayData.tasks, + successes: dayData.successes, + failures: dayData.failures, + successRate: dayData.successRate, + isToday: dateStr === todayStr, + isFuture + }); + + // Start new week on Sunday + if (currentDate.getDay() === 6) { + calendar.push(currentWeek); + currentWeek = []; + } + + currentDate.setDate(currentDate.getDate() + 1); + } + + // Add remaining days if any + if (currentWeek.length > 0) { + calendar.push(currentWeek); + } + + // Calculate summary stats + const allDays = calendar.flat(); + const activeDays = allDays.filter(d => d.tasks > 0); + const totalTasks = activeDays.reduce((sum, d) => sum + d.tasks, 0); + const totalSuccesses = activeDays.reduce((sum, d) => sum + d.successes, 0); + + return { + weeks: calendar, + maxTasks, + summary: { + totalDays: allDays.length, + activeDays: activeDays.length, + totalTasks, + totalSuccesses, + successRate: totalTasks > 0 ? Math.round((totalSuccesses / totalTasks) * 100) : 0, + avgTasksPerActiveDay: activeDays.length > 0 + ? Math.round((totalTasks / activeDays.length) * 10) / 10 + : 0 + }, + currentStreak: data.streaks?.currentDaily || 0 + }; +} diff --git a/server/services/socket.js b/server/services/socket.js index 136d4bad..c7fb2bb6 100644 --- a/server/services/socket.js +++ b/server/services/socket.js @@ -12,6 +12,8 @@ import { platformAccountEvents } from './platformAccounts.js'; import { scheduleEvents } from './automationScheduler.js'; import { activityEvents } from './agentActivity.js'; import { brainEvents } from './brainStorage.js'; +import { moltworldWsEvents } from './moltworldWs.js'; +import { queueEvents } from './moltworldQueue.js'; import * as shellService from './shell.js'; // Store active log streams per socket @@ -296,6 +298,12 @@ export function initSocket(io) { // Set up brain event forwarding setupBrainEventForwarding(); + + // Set up Moltworld WebSocket event forwarding + setupMoltworldWsEventForwarding(); + + // Set up Moltworld queue event forwarding + setupMoltworldQueueEventForwarding(); } function cleanupStream(socketId) { @@ -446,3 +454,21 @@ function setupBrainEventForwarding() { } }); } + +// Set up Moltworld WebSocket event forwarding to agent subscribers +function setupMoltworldWsEventForwarding() { + moltworldWsEvents.on('status', (data) => broadcastToAgents('moltworld:status', data)); + moltworldWsEvents.on('event', (data) => broadcastToAgents('moltworld:event', data)); + moltworldWsEvents.on('presence', (data) => broadcastToAgents('moltworld:presence', data)); + moltworldWsEvents.on('thinking', (data) => broadcastToAgents('moltworld:thinking', data)); + moltworldWsEvents.on('action', (data) => broadcastToAgents('moltworld:action', data)); + moltworldWsEvents.on('interaction', (data) => broadcastToAgents('moltworld:interaction', data)); + moltworldWsEvents.on('nearby', (data) => broadcastToAgents('moltworld:nearby', data)); +} + +// Set up Moltworld queue event forwarding to agent subscribers +function setupMoltworldQueueEventForwarding() { + queueEvents.on('added', (data) => broadcastToAgents('moltworld:queue:added', data)); + queueEvents.on('updated', (data) => broadcastToAgents('moltworld:queue:updated', data)); + queueEvents.on('removed', (data) => broadcastToAgents('moltworld:queue:removed', data)); +} diff --git a/server/services/streamingDetect.js b/server/services/streamingDetect.js index c7c1c816..eaeed687 100644 --- a/server/services/streamingDetect.js +++ b/server/services/streamingDetect.js @@ -1,6 +1,7 @@ import { readFile, readdir, stat } from 'fs/promises'; import { existsSync } from 'fs'; import { join, basename } from 'path'; +import { homedir } from 'os'; import { exec } from 'child_process'; import { promisify } from 'util'; import { safeJSONParse } from '../lib/fileUtils.js'; @@ -10,9 +11,42 @@ const execAsync = promisify(exec); /** * Parse ecosystem.config.js/cjs to extract all processes with their ports * Uses regex parsing since we can't safely execute arbitrary JS + * @returns {{ processes: Array, pm2Home: string|null }} */ export function parseEcosystemConfig(content) { const processes = []; + let pm2Home = null; + + // Extract PM2_HOME constant if defined (e.g., const PM2_HOME = `${require("os").homedir()}/.pm2-grace`) + // First try template literals (backticks) which can contain nested quotes + const templateMatch = content.match(/(?:const|let|var)\s+PM2_HOME\s*=\s*`([^`]+)`/); + if (templateMatch) { + let homePath = templateMatch[1]; + // Replace common template expressions + homePath = homePath.replace(/\$\{require\(['"]os['"]\)\.homedir\(\)\}/g, homedir()); + homePath = homePath.replace(/\$\{require\(['"]os['"]\)\.userInfo\(\)\.username\}/g, process.env.USER || 'user'); + homePath = homePath.replace(/\$\{process\.env\.HOME\}/g, homedir()); + pm2Home = homePath; + } else { + // Try regular string literals + const stringMatch = content.match(/(?:const|let|var)\s+PM2_HOME\s*=\s*['"]([^'"]+)['"]/); + if (stringMatch) { + pm2Home = stringMatch[1]; + } else { + // Check for PM2_HOME in env blocks + const envPm2HomeMatch = content.match(/PM2_HOME\s*:\s*PM2_HOME/); + if (envPm2HomeMatch) { + // PM2_HOME is used but defined as a variable - try template literal first + const varTemplateMatch = content.match(/PM2_HOME\s*=\s*`([^`]+)`/); + if (varTemplateMatch) { + let homePath = varTemplateMatch[1]; + homePath = homePath.replace(/\$\{require\(['"]os['"]\)\.homedir\(\)\}/g, homedir()); + homePath = homePath.replace(/\$\{require\(['"]os['"]\)\.userInfo\(\)\.username\}/g, process.env.USER || 'user'); + pm2Home = homePath; + } + } + } + } // Extract top-level port constants (e.g., const CDP_PORT = 5549) const portConstants = {}; @@ -248,7 +282,7 @@ export function parseEcosystemConfig(content) { lastIndex = endPos; } - return processes; + return { processes, pm2Home }; } /** @@ -262,13 +296,14 @@ function extractVitePort(content) { /** * Parse ecosystem config from a directory path (non-streaming, for refresh) * Also checks vite.config files in subdirectories for processes that use Vite + * @returns {{ processes: Array, pm2Home: string|null }} */ export async function parseEcosystemFromPath(dirPath) { for (const ecosystemFile of ['ecosystem.config.js', 'ecosystem.config.cjs']) { const ecosystemPath = join(dirPath, ecosystemFile); if (existsSync(ecosystemPath)) { const content = await readFile(ecosystemPath, 'utf-8'); - const processes = parseEcosystemConfig(content); + const { processes, pm2Home } = parseEcosystemConfig(content); // For processes that use vite and don't have a port, check their cwd for vite.config for (const proc of processes) { @@ -291,10 +326,10 @@ export async function parseEcosystemFromPath(dirPath) { delete proc.usesVite; } - return processes; + return { processes, pm2Home }; } } - return []; + return { processes: [], pm2Home: null }; } /** @@ -314,6 +349,7 @@ export async function streamDetection(socket, dirPath) { pm2ProcessNames: [], pm2Status: null, processes: [], + pm2Home: null, type: 'unknown' }; @@ -414,7 +450,10 @@ export async function streamDetection(socket, dirPath) { const content = await readFile(ecosystemPath, 'utf-8').catch(() => ''); if (content) { // Parse all processes with their ports using the dedicated parser - const parsedProcesses = parseEcosystemConfig(content); + const { processes: parsedProcesses, pm2Home } = parseEcosystemConfig(content); + if (pm2Home) { + result.pm2Home = pm2Home; + } if (parsedProcesses.length > 0) { // For processes that use vite and don't have a port, check their cwd for vite.config for (const proc of parsedProcesses) { @@ -469,8 +508,18 @@ export async function streamDetection(socket, dirPath) { // Step 5: Check PM2 status emit('pm2', 'running', { message: 'Checking PM2 processes...' }); - const { stdout } = await execAsync('pm2 jlist').catch(() => ({ stdout: '[]' })); - const pm2Processes = safeJSONParse(stdout, []); + // Use custom PM2_HOME if detected from ecosystem config + const pm2Env = result.pm2Home ? { ...process.env, PM2_HOME: result.pm2Home } : process.env; + const pm2Cmd = 'pm2 jlist'; + const { stdout } = await execAsync(pm2Cmd, { env: pm2Env }).catch(() => ({ stdout: '[]' })); + // pm2 jlist may output ANSI codes and warnings before JSON + let jsonStart = stdout.indexOf('[{'); + if (jsonStart < 0) { + const emptyMatch = stdout.match(/\[\](?![0-9])/); + jsonStart = emptyMatch ? stdout.indexOf(emptyMatch[0]) : -1; + } + const pm2Json = jsonStart >= 0 ? stdout.slice(jsonStart) : '[]'; + const pm2Processes = safeJSONParse(pm2Json, []); // Look for processes that might be this app const possibleNames = [ diff --git a/server/services/taskSchedule.js b/server/services/taskSchedule.js index bf6a9445..3eefdc49 100644 --- a/server/services/taskSchedule.js +++ b/server/services/taskSchedule.js @@ -1410,3 +1410,122 @@ Repository: {repoPath} Perform ${taskType} analysis on {appName}. Analyze the codebase and make improvements. Commit changes with clear descriptions.`; } + +/** + * Get upcoming tasks preview - what tasks will run next + * Returns a list of tasks sorted by when they'll be eligible to run + * @param {number} limit - Maximum number of upcoming tasks to return + * @returns {Array} Upcoming tasks with timing info + */ +export async function getUpcomingTasks(limit = 10) { + const schedule = await loadSchedule(); + const now = Date.now(); + const upcoming = []; + + // Process self-improvement tasks + for (const [taskType, interval] of Object.entries(schedule.selfImprovement)) { + if (!interval.enabled) continue; + if (interval.type === INTERVAL_TYPES.ON_DEMAND) continue; + + const check = await shouldRunSelfImprovementTask(taskType); + const execution = schedule.executions[`self-improve:${taskType}`] || { lastRun: null, count: 0 }; + const lastRun = execution.lastRun ? new Date(execution.lastRun).getTime() : 0; + + // Calculate when task becomes eligible + let eligibleAt = now; + let status = 'ready'; + + if (check.shouldRun) { + eligibleAt = now; + status = 'ready'; + } else if (check.nextRunAt) { + eligibleAt = new Date(check.nextRunAt).getTime(); + status = 'scheduled'; + } else if (interval.type === INTERVAL_TYPES.ONCE && execution.count > 0) { + status = 'completed'; + eligibleAt = Infinity; + } + + if (status === 'completed') continue; + + upcoming.push({ + taskType, + category: 'selfImprovement', + intervalType: interval.type, + status, + eligibleAt, + eligibleIn: eligibleAt - now, + eligibleInFormatted: formatTimeRemaining(eligibleAt - now), + lastRun: execution.lastRun, + lastRunFormatted: execution.lastRun ? formatRelativeTime(new Date(execution.lastRun).getTime()) : 'never', + runCount: execution.count, + successRate: check.successRate ?? null, + learningAdjusted: check.learningApplied || false, + adjustmentMultiplier: check.adjustmentMultiplier || 1.0, + description: getTaskTypeDescription(taskType) + }); + } + + // Sort by eligibility time (ready tasks first, then by time until eligible) + upcoming.sort((a, b) => { + if (a.status === 'ready' && b.status !== 'ready') return -1; + if (b.status === 'ready' && a.status !== 'ready') return 1; + return a.eligibleAt - b.eligibleAt; + }); + + return upcoming.slice(0, limit); +} + +/** + * Format time remaining in human-readable form + */ +function formatTimeRemaining(ms) { + if (ms <= 0) return 'now'; + + const minutes = Math.floor(ms / 60000); + const hours = Math.floor(minutes / 60); + const days = Math.floor(hours / 24); + + if (days > 0) return `${days}d ${hours % 24}h`; + if (hours > 0) return `${hours}h ${minutes % 60}m`; + if (minutes > 0) return `${minutes}m`; + return '< 1m'; +} + +/** + * Format relative time (e.g., "2h ago") + */ +function formatRelativeTime(timestamp) { + const now = Date.now(); + const diff = now - timestamp; + + const minutes = Math.floor(diff / 60000); + const hours = Math.floor(minutes / 60); + const days = Math.floor(hours / 24); + + if (days > 0) return `${days}d ago`; + if (hours > 0) return `${hours}h ago`; + if (minutes > 0) return `${minutes}m ago`; + return 'just now'; +} + +/** + * Get human-readable description for task type + */ +function getTaskTypeDescription(taskType) { + const descriptions = { + 'ui-bugs': 'Find and fix UI bugs', + 'mobile-responsive': 'Check mobile responsiveness', + 'security': 'Security vulnerability audit', + 'code-quality': 'Code quality improvements', + 'console-errors': 'Fix console errors', + 'performance': 'Performance optimization', + 'cos-enhancement': 'Enhance CoS capabilities', + 'test-coverage': 'Improve test coverage', + 'documentation': 'Update documentation', + 'feature-ideas': 'Brainstorm and implement features', + 'accessibility': 'Accessibility audit', + 'dependency-updates': 'Update dependencies' + }; + return descriptions[taskType] || taskType.replace(/-/g, ' '); +}