Compare commits

...

93 Commits

Author SHA1 Message Date
a4625a4559 fix: restore CLI-triggered workflows, only remove restart-gateway
The previous commit incorrectly deleted all workflows. Only restart-gateway
should be removed (replaced by direct shell trigger). Other workflows
(solve-issue, extract-knowledge, develop-sense, develop-workflow) are
CLI-triggered and independent of sense coupling.
2026-05-02 13:55:27 +00:00
c71212a0ce refactor: sense triggers shell command directly, remove workflow
- SenseTrigger is now { command: string } — no workflow coupling
- Restart gateway via direct systemctl command instead of workflow
- Remove workflows/ directory and workflow config from nerve.yaml
2026-05-02 13:44:22 +00:00
8186a23ceb chore: remove unused schema and migrations 2026-05-02 09:38:22 +00:00
29d47bd9c4 feat: add restart-gateway workflow, remove unused senses
- Remove 4 data-only senses (linux-system-health, worker-process-metrics,
  hermes-session-message-stats, git-workspace-status) — none triggered workflows
- Refactor hermes-gateway-health sense: add state tracking, trigger
  restart-gateway workflow after 3 consecutive failures (with 5min cooldown)
- Add restart-gateway workflow: restarter role (systemctl restart) +
  verifier role (check service came back)
- Simplify nerve.yaml to single sense + single workflow
2026-05-02 05:38:44 +00:00
436ccf12b3 refactor(solve-issue): flatten role folders to single files
Also clean up leftover knowledge-extraction folder (renamed to extract-knowledge in dc1e96d).

Refs uncaged/nerve#284
2026-04-30 13:05:41 +00:00
2f78c72e4e refactor(solve-issue): flatten role folders to single files
Refs uncaged/nerve#284
2026-04-30 13:04:36 +00:00
dc1e96d8f3 refactor(workflows): rename knowledge-extraction to extract-knowledge
Align WorkflowDefinition name, nerve.yaml, role prompts, and lockfile path with extract-knowledge.

Refs #285

Made-with: Cursor
2026-04-30 13:04:29 +00:00
7432f80d61 refactor(knowledge-extraction): convert questioner and answerer to createRole four-tuple
- questioner: createRole(adapter, questionerPrompt, schema, extract) + queue short-circuit + meta post-processing
- answerer: createRole(adapter, answererPrompt, schema, extract) + empty-questions short-circuit
- build.ts: use createLlmAdapter(extract.provider) as default LLM adapter for questioner/answerer

Refs uncaged/nerve#277
2026-04-30 12:38:58 +00:00
1da41c7f08 chore: remove stale sense index.js from source and tracking
小橘 <xiaoju@shazhou.work>
2026-04-30 09:16:13 +00:00
07be0d3dfa refactor: move all build output to dist/
- senses build to dist/senses/<name>/index.js
- workflows build to dist/workflows/<name>/index.js
- scripts/build.mjs: clean dist/ before build, output to dist/
- .gitignore: simplified to just dist/

小橘 <xiaoju@shazhou.work>
2026-04-30 09:16:04 +00:00
0fdd2d26cc chore: remove build artifacts from git tracking
These are esbuild outputs, now covered by .gitignore.

小橘 <xiaoju@shazhou.work>
2026-04-30 09:11:16 +00:00
cf7e288874 chore: consolidate .gitignore into root
Remove per-workflow .gitignore files, add build output patterns to root.

小橘 <xiaoju@shazhou.work>
2026-04-30 09:06:22 +00:00
f7cf1a1cb2 refactor: single-package workspace with root esbuild build
Merge workflow and sense devDependencies into root, remove per-package package.json and workflow tsconfigs, add scripts/build.mjs for consistent outputs.

Fixes #22
2026-04-30 09:03:05 +00:00
e4fd5d6ba4 refactor: migrate all workflows to RFC-005 ThreadContext signatures
- Role: (start, messages) → (ctx: ThreadContext)
- AgentFn prompt callbacks: (start) → (ctx)
- ModeratorContext → ThreadContext
- 13 files updated across knowledge-extraction and solve-issue workflows

小橘 <xiaoju@shazhou.work>
2026-04-30 08:39:52 +00:00
1c512435de feat: add knowledge-extraction BFS workflow
Three-role workflow (questioner → answerer → explorer) that iterates
over .knowledge/ cards to discover and fill knowledge gaps via BFS.

- questioner: createLlmRole, reads card, asks 3 technical questions
- answerer: spawnSafe nerve knowledge query, judges answers
- explorer: reads code, writes/patches .knowledge cards, runs sync
- moderator: BFS queue from message history, stagnation rule

Closes #266
2026-04-30 02:27:10 +00:00
8774d71d57 feat: update senses to return ComputeResult<T>
Wrap compute return values in { signal, workflow: null }
to match new SenseComputeFn contract.

— 小橘 🍊(NEKO Team)
2026-04-30 00:38:34 +00:00
1d9e574c94 fix: remove unused AbortSignal param from sense compute
— 小橘 🍊(NEKO Team)
2026-04-30 00:23:12 +00:00
252162ea8e refactor: pure sense compute — return data instead of db.insert
All 5 senses updated to new API:
- compute(signal: AbortSignal) => Promise<T | null>
- Export table for runtime-side insert
- Remove drizzle-orm/libsql imports

Refs uncaged/nerve#264

— 小橘 🍊(NEKO Team)
2026-04-30 00:15:03 +00:00
60979aaa6a refactor: migrate develop-sense/develop-workflow to @uncaged/nerve-workflow-meta
Delete local roles, moderator, and build files. Workflow index.ts
now imports factory from package and wires adapters/extract/cwd.

Closes #21

— 小橘 🍊(NEKO Team)
2026-04-29 14:52:25 +00:00
b0cff7e0ed refactor: update reviewer calls to use ReviewerConfig object
API changed from (adapter, extract, cwd) to (adapter, extract, { cwd, ... })

— 小橘 🍊(NEKO Team)
2026-04-29 14:32:23 +00:00
b282dfdb7b Merge pull request 'refactor: use @uncaged/nerve-role-reviewer package' (#20) from feat/use-role-reviewer-package into master 2026-04-29 14:27:36 +00:00
bed5ecb56b refactor: use @uncaged/nerve-role-reviewer package
Replace inline reviewer role with import from package.
Both develop-sense and develop-workflow now share the same code.

— 小橘 🍊(NEKO Team)
2026-04-29 14:26:27 +00:00
6a2dbb7335 Merge pull request 'refactor: use @uncaged/nerve-role-committer package' (#19) from feat/use-role-committer-package into master 2026-04-29 14:20:02 +00:00
174df68368 refactor: use @uncaged/nerve-role-committer package, delete _shared/
- develop-sense/develop-workflow committer → re-export from package
- solve-issue committer → uses decorateRole chain (custom prompt stays)
- Delete _shared/workspace-committer.ts and _shared/ directory

RFC-004 Phase 1 complete
2026-04-29 14:18:12 +00:00
59b8f033ba Merge pull request 'refactor: simplify workspace committer — infer from thread' (#18) from refactor/17-simplify-committer into master 2026-04-29 12:53:34 +00:00
0a9da468f7 refactor: simplify workspace committer — agent infers context from thread
Remove nerveRoot, workflowName, conventionalCommitScopeHint, branchCheckoutExample params.
Signature: createWorkspaceCommitterRole(adapter, extract)
Agent reads thread history to decide branch name, scope, and commit message.

Closes #17
2026-04-29 12:52:56 +00:00
ac47daa42b Merge pull request 'refactor: decouple adapters from workflow factories + createXxxRole' (#16) from refactor/15-decouple-adapters into master 2026-04-29 12:44:18 +00:00
a609dc2486 refactor: derive adapter keys from Meta type instead of manual union 2026-04-29 12:41:10 +00:00
eaddd88109 refactor: add defaultAdapter + typed role union, adapters becomes Partial
- Each workflow factory takes defaultAdapter: AgentFn + adapters?: Partial<Record<RoleUnion, AgentFn>>
- index.ts only overrides roles that differ from default (planner/coder use cursor, rest fallback)
- Cleaner call sites, type-safe role names

Refs #15
2026-04-29 12:38:21 +00:00
1683e41b05 refactor: decouple adapters from workflow factories, roles export createXxxRole
- Rename build* → create* workflow factories
- Workflow factories accept adapters: Record<string, AgentFn>
- Each role file exports createXxxRole(adapter, ...) factory
- _shared/workspace-committer accepts adapter as first param
- All adapter imports moved to index.ts (injection point)
- solve-issue roles also updated

Closes #15
2026-04-29 12:35:07 +00:00
a506e5b36b Merge pull request 'refactor: flatten role folders into single .ts files' (#14) from refactor/13-flatten-role-folders into master 2026-04-29 12:23:34 +00:00
42f943c303 refactor: flatten role folders into single .ts files
Each role's index.ts + prompt.ts merged into a single <role>.ts file.
Committer stays as re-export from _shared.
Import paths updated in build.ts and moderator.ts.

Closes #13
2026-04-29 12:21:41 +00:00
215a8f6566 fix(committer): forbid --author in shared workspace committer prompt 2026-04-29 11:15:27 +00:00
f6e29a5cae Merge chore/10-dedup-committer: dedup workspace committer into _shared 2026-04-29 11:15:10 +00:00
f651389ad8 chore(workflows): deduplicate workspace committer role
Extract shared buildWorkspaceCommitterRole into workflows/_shared/workspace-committer.ts
with parameterized conventionalCommitScopeHint and branchCheckoutExample.

Both develop-sense and develop-workflow committer/index.ts now re-export from the
shared module. Duplicate prompt.ts files removed.

Fixes #10
2026-04-29 11:11:51 +00:00
85fac3158d Merge refactor/9-committer-agent-workflow: committer agent + forbid --author 2026-04-29 11:09:12 +00:00
16bea3b8a7 fix(committer): forbid --author flag, use repo git config identity 2026-04-29 11:09:01 +00:00
03146b210a refactor(solve-issue): align committer/publish prompts and docs with agent role workflow
- publish/prompt.ts: require 'Fixes #N' in Ref section to auto-close issues
- CONVENTIONS.md: update Role Patterns table (committer uses createRole hermesAdapter), fix Meta Convention (committed not success)
- committer/prompt.ts: add defaultBranch guard before branch creation to prevent empty PR diffs
- implement/prompt.ts: strengthen git commit prohibition

Refs #9
2026-04-29 11:06:05 +00:00
小橘 🍊(NEKO Team)
c585e0d8a8 Refactor committer into Hermes agent with branch/commit/push workflow
Add solve-issue committer after implement; replace develop-sense and develop-workflow script roles with createRole(hermesAdapter). Implement prompt no longer does git; publish prompt asks for meaningful PR titles.

Refs #9

Made-with: Cursor
2026-04-29 10:44:28 +00:00
3a2b8a49a3 fix: add nerve-daemon back to workspace deps (needed by CLI thread commands)
小橘 🍊(NEKO Team)
2026-04-29 10:11:12 +00:00
aef9943746 fix(solve-issue): prepare supports local repo path from trigger prompt
If the trigger prompt specifies a local repo path (--repo /path or absolute path),
prepare validates it instead of cloning to ~/Code/. Enables running solve-issue
on repos outside the default ~/Code directory.

小橘 🍊(NEKO Team)
2026-04-29 10:09:34 +00:00
小橘 🍊(NEKO Team)
95df8bc3c2 refactor(workflows): use createRole instead of compileWorkflowSpec
Switch build.ts and solve-issue inner roles to @uncaged/nerve-workflow-utils createRole with LlmExtractorConfig. Remove @uncaged/nerve-daemon from workspace dependencies; keep override for linking. Planner uses createCursorAdapter ask mode; dynamic cwd via start.meta.workdir.

Made-with: Cursor
2026-04-29 10:01:02 +00:00
小橘 🍊(NEKO Team)
70fd064bad Refactor workflows to use compileWorkflowSpec from nerve-daemon
Remove workflows/_shared; wire createLlmExtractFn, zodMeta, and createCursorAdapter(mode ask). Plan/implement/publish compile inner specs via daemon.

Made-with: Cursor
2026-04-29 09:35:13 +00:00
56ce22fb1b Migrate workflows to WorkflowSpec-style roles (RFC-003)
Replace createCursorRole/createHermesRole with adapter + prompt + zod meta.

Add shared compileRoleSpec, cursor ask adapter, nerve.yaml extract defaults.

Refs #248

Made-with: Cursor
2026-04-29 09:23:55 +00:00
66ce30cdfb rename: generate-workflow → develop-workflow, generate-sense → develop-sense
小橘 🍊(NEKO Team)
2026-04-29 00:20:01 +00:00
28ac2e9dad chore: change workflow overflow from drop to queue
小橘 🍊(NEKO Team)
2026-04-29 00:13:14 +00:00
86f02da306 refactor: rename workflows to verb phrases, delete gitea-issue-solver
- workflow-generator → generate-workflow
- sense-generator → generate-sense
- Delete gitea-issue-solver (replaced by solve-issue)

小橘 🍊(NEKO Team)
2026-04-28 23:17:26 +00:00
7313111548 chore: remove nerve.db from tracking, add *.db to gitignore
小橘 🍊(NEKO Team)
2026-04-28 23:08:00 +00:00
64a5fc5301 chore(workflow): auto-generated commit 2026-04-28 22:46:19 +00:00
d786827ac8 chore(workflow): auto-generated commit 2026-04-28 22:37:27 +00:00
d6e95f5c65 chore(workflow): auto-generated commit 2026-04-28 22:24:20 +00:00
95587260f6 chore(workflow): auto-generated commit 2026-04-28 16:02:16 +00:00
57c740cdde Revert "chore(workflow): auto-generated commit"
This reverts commit 75f2768a8c7713879bb2ab564f42f24bc609338e.
2026-04-28 15:49:22 +00:00
75f2768a8c chore(workflow): auto-generated commit 2026-04-28 14:46:13 +00:00
3d9f239230 refactor: moderator uses dual limits — max coder rounds (20) + max total rejections (10)
Either limit triggers END. Simple, no per-rejector budgets.
2026-04-28 14:27:40 +00:00
bbcaf1eba5 refactor: moderator uses per-rejector limits instead of shared coderCount
- coder self-iterations (done=false): max 5
- reviewer rejections: max 3
- tester rejections: max 3
- committer rejections: max 2
Each budget is independent, no longer starved by coder's own passes.
2026-04-28 14:22:04 +00:00
fbcc1ff30c feat: add CONVENTIONS.md, reviewer references it instead of hardcoding rules
- CONVENTIONS.md covers: language paradigm, naming, error handling,
  workflow/sense structure, role patterns, meta convention, git rules
- Reviewer prompt now: cat CONVENTIONS.md + check diff against it
- Single source of truth for all roles
2026-04-28 14:19:20 +00:00
76760c4d29 fix: reviewer — remove 'missing files from planner' check, planner designs roles not files 2026-04-28 14:16:23 +00:00
18e201b49c fix: reviewer prompt — reject or approve, no middle ground 2026-04-28 14:13:33 +00:00
daf07b5746 feat: add reviewer role to all three workflows
- workflow-generator, sense-generator, gitea-issue-solver all now have:
  planner → coder → reviewer → tester → committer → END
- Reviewer uses createHermesRole with git diff/status for static analysis
- Checks: garbage files, secrets, debug code, unrelated changes
- Planner prompt now requires Role Behavior sections for every role
- Coder prompt now emphasizes reading initial user prompt for specifics
2026-04-28 13:56:37 +00:00
bd89dcaff6 chore(workflow): auto-generated commit 2026-04-28 13:33:14 +00:00
994de1e7ff chore(workflow): auto-generated commit 2026-04-28 13:20:29 +00:00
e8765abac6 fix: pass nerveRoot to tester prompts for correct path resolution
Hermes agent cwd is not necessarily the nerve workspace root.
Inject nerveRoot into tester prompts so all file paths and commands
use absolute paths to the workspace directory.
2026-04-28 13:03:31 +00:00
ef7d83ad0a chore(sense): auto-generated commit 2026-04-28 13:02:12 +00:00
495d8d1b60 chore(workflow): auto-generated commit 2026-04-28 12:55:08 +00:00
0fab8a68c3 chore: declare pnpm workspace, remove per-package lockfiles and pnpm config
- Add pnpm-workspace.yaml (senses/*, workflows/*)
- Add root build script: pnpm -r build
- Remove pnpm.onlyBuiltDependencies from sense package.json
- Remove pnpm.overrides from workflow package.json
- Remove per-package pnpm-lock.yaml and node_modules
2026-04-28 11:11:41 +00:00
7fb161cf96 fix: tester prompts — build before list, remove non-existent dry-run command
- Both workflow/sense tester: build first, then validate, then list
- workflow-generator: remove nerve workflow dry-run (doesn't exist)
- sense-generator: add build step before sense list/trigger
2026-04-28 11:04:31 +00:00
6778ba5246 refactor: clarify coder done semantics + allow multi-step iterations
- done=true means all files created, build+lint pass
- done=false means progress made, continue next iteration
- Updated both sense-generator and workflow-generator coder prompts
2026-04-28 11:02:25 +00:00
d638623456 refactor(workflow-generator): simplify meta to routing booleans + log-to-file
- planner: { ready }, coder: { done }, tester: { passed }, committer: { success }
- planner/coder: createCursorRole, tester: createHermesRole
- committer: direct spawn, output to .log file
- moderator: coder loop (max 5), committer fail → coder
- bundle 24kb → 8.7kb

Fixes #5
2026-04-28 10:22:57 +00:00
bf77e3452a chore: gitignore dist/ for workflow-generator 2026-04-28 08:50:47 +00:00
a469f30b42 refactor(workflow-generator): multi-file DIP + Role Factory + esbuild bundle
- Split 500-line monolith into roles/{planner,coder,tester,committer}/
- Each role: index.ts (build function) + prompt.ts (pure function)
- Use createCursorRole/createLlmRole/createHermesRole factories
- DIP: env vars read in index.ts, injected via build.ts
- esbuild bundle to dist/index.js (24kb)
- Moderator logic preserved: planner→coder→tester→committer with retries

Fixes xiaoju/nerve-workspace#3
2026-04-28 08:48:23 +00:00
4cf10ad7bf feat: migrate senses to TypeScript source + esbuild bundle
- Move index.js → src/index.ts with proper types for all 4 senses
- Move schema.ts → src/schema.ts
- Add package.json with esbuild build script per sense
- Bundle to index.js at sense root (daemon loads this)
- Update sense-generator coder prompt with TypeScript conventions

Fixes #224
2026-04-28 07:26:53 +00:00
1940ccedd6 refactor: bundle sense-generator to dist/ via esbuild
- Add esbuild build script to package.json
- Add .gitignore for dist/
- Remove stale tsc-generated .js files (now bundled)
- Include sense files generated by workflow run (hermes-gateway-health update, worker-process-metrics)
- Clean up nerve.yaml (remove deleted workflows, migrate reflexes to interval)
2026-04-28 05:31:04 +00:00
2c77f7ffc2 refactor: simplify index.ts — env vars only, drop cfg/spawnSafe 2026-04-28 04:56:52 +00:00
f1720eea5e refactor: infer Meta types from zod schema instead of hand-writing 2026-04-28 04:52:21 +00:00
645f0bacf2 refactor: remove redundant context from prompts, delegate to nerve-dev skill
- Remove nerveYaml injection from planner (skill has it)
- Remove sensesDir/nerveRoot from coder and tester (skill has conventions)
- Prompts now just say 'read the skill' instead of inlining knowledge
- BuildSenseGeneratorDeps reduced to { provider, cwd }
- index.ts drops getNerveYaml(), SENSES_DIR, readFileSync
2026-04-28 04:50:21 +00:00
e460d64786 refactor: inline types.ts into role index.ts, remove separate type files 2026-04-28 04:46:21 +00:00
34e42c5c3e chore: remove test workflows (hello-world, pr-code-reviewer, pr-summarizer) 2026-04-28 04:42:23 +00:00
fc2ca13dc3 refactor: remove buildSenseExamples, use @uncaged/nerve-skills for agent discovery
- Delete buildSenseExamples() (~25 lines of runtime file reading)
- Remove senseExamples from BuildSenseGeneratorDeps and BuildPlannerDeps
- Planner prompt now directs agent to read nerve-dev skill via npm package
- Clean up unused existsSync import

Closes xiaoju/nerve-workspace#2
2026-04-28 04:38:33 +00:00
69eb4ffe49 refactor: use destructuring to simplify build functions 2026-04-28 04:04:49 +00:00
cb61e98979 refactor(sense-generator): full DIP — all deps injected via build functions
Every role is self-contained (types.ts, prompt.ts, index.ts).
No shared.ts, no cross-role imports. All dependencies injected:

  index.ts          — wiring (resolve env, call buildSenseGenerator)
  build.ts          — buildSenseGenerator(deps) → WorkflowDefinition
  moderator.ts      — pure routing, composes meta from role types
  roles/planner/    — buildPlannerRole(deps), self-contained
  roles/coder/      — buildCoderRole(deps), self-contained
  roles/tester/     — buildTesterRole(deps), self-contained

Workflow is now reusable: buildSenseGenerator() can be called with
any provider/paths, not hardcoded to this machine.

小橘 🍊(NEKO Team)
2026-04-28 04:00:38 +00:00
6d3313223f refactor(sense-generator): inject provider from index.ts, roles are now sync
Provider resolved once at top level, injected into each build*Role().
Role builders no longer async — they're pure factory calls.

小橘 🍊(NEKO Team)
2026-04-28 03:56:02 +00:00
21237e19a0 refactor(sense-generator): extract moderator to its own file
小橘 🍊(NEKO Team)
2026-04-28 03:53:49 +00:00
6a2d6b0627 fix(sense-generator): derive attempt count from steps, not llmExtract
attempt is a deterministic fact (count of tester steps), not something
an LLM should guess. Removed from tester meta schema, moderator counts
it directly from context.steps.

小橘 🍊(NEKO Team)
2026-04-28 03:51:42 +00:00
7c4883ddec refactor(sense-generator): tester → createHermesRole for e2e verification
Replaced 122-line hand-written smoke test with createHermesRole.
Hermes runs the full lifecycle check autonomously via terminal:
files → nerve.yaml → sense list → trigger → query → logs on failure.

All 3 roles now use factories:
  planner: createCursorRole (ask mode)
  coder:   createCursorRole (default mode)
  tester:  createHermesRole (e2e verification)

小橘 🍊(NEKO Team)
2026-04-28 03:48:26 +00:00
bc4ac8a5cc refactor(sense-generator): prompt.ts instead of prompt.md + mustache
Static imports, no runtime file reads, bundler friendly.
Removed mustache dependency.

小橘 🍊(NEKO Team)
2026-04-28 03:38:26 +00:00
a811660a33 refactor(sense-generator): extract prompts to prompt.md templates
Each role's prompt is now a separate markdown file with {{mustache}} placeholders,
loaded at module init and interpolated at runtime.

小橘 🍊(NEKO Team)
2026-04-28 03:32:51 +00:00
516a28533a refactor(sense-generator): extract meta schemas to types.ts
小橘 🍊(NEKO Team)
2026-04-28 02:38:07 +00:00
fd3a8c64f2 refactor(sense-generator): remove unnecessary buildWorkflow wrapper
小橘 🍊(NEKO Team)
2026-04-28 02:32:55 +00:00
2d63639ed1 refactor(sense-generator): split roles into separate directories
Following nerve-dev best practice: each role gets its own directory.

Structure:
  index.ts                    — 31 lines (WorkflowDefinition + moderator)
  roles/planner/index.ts      — 48 lines (createCursorRole)
  roles/coder/index.ts        — 33 lines (createCursorRole)
  roles/tester/index.ts       — 122 lines (hand-written smoke test)
  roles/shared.ts             — 63 lines (providers, helpers)
  roles/types.ts              — 5 lines (SenseMeta)

Was: single 416-line index.ts

Refs uncaged/nerve#210
小橘 🍊(NEKO Team)
2026-04-28 02:30:12 +00:00
e05c71d6b0 refactor(sense-generator): use createCursorRole factory, slim meta to routing-only
- planner/coder: replaced 80+ lines hand-written agent calls with createCursorRole()
- SenseMeta slimmed to routing signals only (senseName, filesCreated, passed/attempt)
- Roles read context from thread via nerve thread <id>, not from previous role's meta
- tester stays hand-written (pure CLI logic)
- Re-exported spawnSafe from workflow-utils for helper use

Refs uncaged/nerve#210

小橘 🍊(NEKO Team)
2026-04-28 02:22:38 +00:00
8ff6003a75 refactor(sense-generator): use createCursorRole and slim SenseMeta
Replace hand-written planner and coder with createCursorRole from nerve-workflow-utils. Prompts instruct reading the Nerve thread via nerve thread show. Extract uses resolveDashScopeProvider. SenseMeta keeps routing-only fields; tester remains hand-written with filesystem and smoke checks.

Made-with: Cursor
2026-04-28 02:20:01 +00:00
c5ea790447 refactor(workflow-generator): simplify roles — merge analyst+architect→planner, add coder⇄tester loop, hermes committer
Refs #143

小橘 <xiaoju@shazhou.work>
2026-04-25 10:31:33 +00:00
60 changed files with 2094 additions and 4539 deletions

2
.gitignore vendored
View File

@ -4,3 +4,5 @@ logs/
nerve.pid
nerve.sock
false/
*.db
dist/

154
CONVENTIONS.md Normal file
View File

@ -0,0 +1,154 @@
# Nerve Workspace Conventions
This document defines coding and workflow conventions for the nerve-workspace (`~/.uncaged-nerve`).
All roles (planner, coder, reviewer, tester) should reference this file.
## Language & Paradigm
### Functional-first
Use `function` + `type`, not `class` + `interface`.
```typescript
// ✅ Good
type Signal = { senseId: string; value: unknown; ts: number };
function createSignal(senseId: string, value: unknown): Signal { ... }
// ❌ Bad
class Signal implements ISignal { ... }
```
### Rules
| Rule | Description |
|------|-------------|
| `type` over `interface` | All type definitions use `type` |
| `function` over `class` | Pure functions + closures, no class |
| No `this` | Functions must not depend on `this` context |
| No inheritance | No `extends`, `implements`, `abstract` |
| Composition over inheritance | Use function composition |
| No optional properties | Use `T \| null` instead of `?:` |
| No dynamic `import()` | Always static top-level `import` |
| `async/await` only | Never `.then()` chains |
### Exceptions
Classes allowed when required by a library (e.g. Drizzle `sqliteTable`) or Error subclasses.
## Naming
| Type | Style | Example |
|------|-------|---------|
| Files | kebab-case | `signal-bus.ts` |
| Types | PascalCase | `SignalBus` |
| Functions/variables | camelCase | `createSignalBus` |
| Constants | UPPER_SNAKE | `MAX_RETRY_COUNT` |
## Error Handling
- Use `Result<T, E>` for expected failures
- `throw` only for unrecoverable bugs
- No try-catch for flow control
```typescript
type Result<T, E = Error> = { ok: true; value: T } | { ok: false; error: E };
```
## Workflow Structure
Each workflow follows the multi-file pattern:
```
workflows/<name>/
index.ts — WorkflowDefinition default export (thin entry point)
build.ts — factory function with dependency injection
moderator.ts — moderator function + WorkflowMeta type
roles/
<role>/
index.ts — build function + meta schema
prompt.ts — prompt pure function (string template)
package.json — with esbuild build script
tsconfig.json
```
### Role Implementation Patterns
| Pattern | When to use | Example |
|---------|-------------|---------|
| `createCursorRole` | Needs file system access (code generation, planning) | planner, coder |
| `createHermesRole` | Needs shell + tools (testing, reviewing) | tester, reviewer |
| `createLlmRole` | Pure LLM reasoning, no tools | analysis roles |
| `createRole(hermesAdapter, …)` | Agent role with LLM + shell (branch/commit/push from thread context) | solve-issue committer, publish |
| Direct `Role<Meta>` | No LLM needed, scripted logic | thin wrappers only |
### Meta Convention
Meta is a **routing signal only** — one boolean per role:
- `{ ready: boolean }` — planner
- `{ done: boolean }` — coder
- `{ approved: boolean }` — reviewer
- `{ passed: boolean }` — tester
- `{ committed: boolean }` — committer (solve-issue: branch created, pushed)
- `{ success: boolean }` — publish (PR opened)
### Standard Flow
```
planner → coder → reviewer → tester → committer → END
```
- Reviewer rejection → back to coder (within MAX_CODER_ITERATIONS)
- Tester failure → back to coder (within MAX_CODER_ITERATIONS)
- Committer failure → back to coder (within MAX_CODER_ITERATIONS)
## Sense Structure
```
senses/<name>/
src/
index.ts — compute() function + schema
schema.ts — Drizzle table definition
migrations/ — SQLite migrations
package.json — with esbuild build script
```
## Toolchain
| Tool | Purpose |
|------|---------|
| **pnpm** | Package manager (workspace mode) |
| **TypeScript** | Type checking |
| **esbuild** | Bundling (each workflow/sense bundles independently) |
### Commands
```bash
pnpm build # build all packages
pnpm -r build # same, explicit recursive
cd workflows/<name> && pnpm build # build one workflow
```
## Git & Commit Convention
```
<type>(<scope>): <description>
type: feat | fix | refactor | docs | chore | test
scope: workflow | sense | core | ...
```
### What NOT to commit
- `node_modules/`
- `dist/` (build outputs, generated by esbuild)
- `.DS_Store`
- pnpm cache artifacts (e.g. `false/` directories from `--no-cache` misuse)
- Secrets, API keys, tokens
- Unrelated file changes outside the task scope
## Dependencies
Shared packages from the nerve monorepo:
- `@uncaged/nerve-core` — types, END constant, WorkflowDefinition
- `@uncaged/nerve-workflow-utils` — role factories, spawnSafe, llmExtract, cursorAgent
- `zod` — schema definitions for meta extraction

View File

View File

@ -1,45 +1,26 @@
# nerve.yaml — Nerve workspace configuration
extract:
provider: dashscope
model: qwen-plus
senses:
linux-system-health:
group: system
throttle: 10s
timeout: 15s
grace_period: null
hermes-gateway-health:
group: system
interval: 2m
throttle: 30s
timeout: 30s
grace_period: null
hermes-session-message-stats:
group: hermes
throttle: 30s
timeout: 60s
grace_period: null
workflows:
sense-generator:
develop-sense:
concurrency: 1
overflow: drop
workflow-generator:
overflow: queue
develop-workflow:
concurrency: 1
overflow: drop
pr-summarizer:
overflow: queue
solve-issue:
concurrency: 1
overflow: drop
pr-code-reviewer:
overflow: queue
extract-knowledge:
concurrency: 1
overflow: drop
hello-world:
concurrency: 1
overflow: drop
reflexes:
- kind: sense
sense: linux-system-health
interval: 30s
- kind: sense
sense: hermes-gateway-health
interval: 2m
- kind: sense
sense: hermes-session-message-stats
interval: 15m
overflow: queue

View File

@ -3,24 +3,39 @@
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "node scripts/build.mjs"
},
"dependencies": {
"@uncaged/nerve-adapter-cursor": "link:../repos/nerve/packages/adapter-cursor",
"@uncaged/nerve-adapter-hermes": "link:../repos/nerve/packages/adapter-hermes",
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-daemon": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"@uncaged/nerve-daemon": "link:../repos/nerve/packages/daemon",
"@uncaged/nerve-role-committer": "link:../repos/nerve/packages/role-committer",
"@uncaged/nerve-role-reviewer": "link:../repos/nerve/packages/role-reviewer",
"@uncaged/nerve-workflow-meta": "link:../repos/nerve/packages/workflow-meta",
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils",
"drizzle-orm": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"drizzle-kit": "latest"
"@types/node": "^22.0.0",
"drizzle-kit": "latest",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
},
"pnpm": {
"onlyBuiltDependencies": [
"esbuild"
],
"overrides": {
"@uncaged/nerve-adapter-cursor": "link:../repos/nerve/packages/adapter-cursor",
"@uncaged/nerve-adapter-hermes": "link:../repos/nerve/packages/adapter-hermes",
"@uncaged/nerve-daemon": "link:../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils"
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils",
"@uncaged/nerve-role-committer": "link:../repos/nerve/packages/role-committer",
"@uncaged/nerve-workflow-meta": "link:../repos/nerve/packages/workflow-meta"
}
}
}

225
pnpm-lock.yaml generated
View File

@ -5,20 +5,39 @@ settings:
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-adapter-cursor': link:../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes': link:../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-daemon': link:../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../repos/nerve/packages/workflow-utils
'@uncaged/nerve-role-committer': link:../repos/nerve/packages/role-committer
'@uncaged/nerve-workflow-meta': link:../repos/nerve/packages/workflow-meta
importers:
.:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../repos/nerve/packages/adapter-cursor
version: link:../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../repos/nerve/packages/adapter-hermes
version: link:../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../repos/nerve/packages/core
version: link:../repos/nerve/packages/core
'@uncaged/nerve-daemon':
specifier: link:../repos/nerve/packages/daemon
version: link:../repos/nerve/packages/daemon
'@uncaged/nerve-role-committer':
specifier: link:../repos/nerve/packages/role-committer
version: link:../repos/nerve/packages/role-committer
'@uncaged/nerve-role-reviewer':
specifier: link:../repos/nerve/packages/role-reviewer
version: link:../repos/nerve/packages/role-reviewer
'@uncaged/nerve-workflow-meta':
specifier: link:../repos/nerve/packages/workflow-meta
version: link:../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../repos/nerve/packages/workflow-utils
version: link:../repos/nerve/packages/workflow-utils
@ -29,9 +48,196 @@ importers:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
drizzle-kit:
specifier: latest
version: 0.31.10
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/git-workspace-status:
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/hermes-gateway-health:
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/hermes-session-message-stats:
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/linux-system-health:
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/worker-process-metrics:
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
workflows/develop-sense:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor
version: link:../../../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../../../repos/nerve/packages/adapter-hermes
version: link:../../../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-meta':
specifier: link:../../../repos/nerve/packages/workflow-meta
version: link:../../../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
workflows/develop-workflow:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor
version: link:../../../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../../../repos/nerve/packages/adapter-hermes
version: link:../../../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-meta':
specifier: link:../../../repos/nerve/packages/workflow-meta
version: link:../../../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
workflows/extract-knowledge:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor
version: link:../../../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../../../repos/nerve/packages/adapter-hermes
version: link:../../../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
workflows/solve-issue:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor
version: link:../../../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../../../repos/nerve/packages/adapter-hermes
version: link:../../../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
packages:
@ -490,6 +696,9 @@ packages:
cpu: [x64]
os: [win32]
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
@ -750,6 +959,14 @@ packages:
tunnel-agent@0.6.0:
resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==}
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
@ -995,6 +1212,10 @@ snapshots:
'@esbuild/win32-x64@0.27.7':
optional: true
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
base64-js@1.5.1:
optional: true
@ -1286,6 +1507,10 @@ snapshots:
safe-buffer: 5.2.1
optional: true
typescript@5.9.3: {}
undici-types@6.21.0: {}
util-deprecate@1.0.2:
optional: true

46
scripts/build.mjs Normal file
View File

@ -0,0 +1,46 @@
import * as esbuild from "esbuild";
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const root = path.join(path.dirname(fileURLToPath(import.meta.url)), "..");
const dist = path.join(root, "dist");
const opts = {
bundle: true,
platform: "node",
format: "esm",
packages: "external",
};
function listDirs(dir) {
if (!fs.existsSync(dir)) return [];
return fs
.readdirSync(dir)
.filter((name) => !name.startsWith(".") && !name.startsWith("_"))
.map((name) => ({ name, full: path.join(dir, name) }))
.filter(({ full }) => fs.statSync(full).isDirectory());
}
async function main() {
// Clean dist/
fs.rmSync(dist, { recursive: true, force: true });
for (const { name, full } of listDirs(path.join(root, "senses"))) {
const entry = path.join(full, "src", "index.ts");
if (!fs.existsSync(entry)) continue;
const outfile = path.join(dist, "senses", name, "index.js");
fs.mkdirSync(path.dirname(outfile), { recursive: true });
await esbuild.build({ ...opts, entryPoints: [entry], outfile });
}
for (const { name, full } of listDirs(path.join(root, "workflows"))) {
const entry = path.join(full, "index.ts");
if (!fs.existsSync(entry)) continue;
const outfile = path.join(dist, "workflows", name, "index.js");
fs.mkdirSync(path.dirname(outfile), { recursive: true });
await esbuild.build({ ...opts, entryPoints: [entry], outfile });
}
}
await main();

View File

@ -1,14 +0,0 @@
-- Migration: 0001_init
-- Creates the hermes_gateway_health table for hermes-gateway-health sense.
CREATE TABLE IF NOT EXISTS hermes_gateway_health (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts INTEGER NOT NULL,
alive INTEGER NOT NULL,
main_pid INTEGER NOT NULL,
rss_bytes INTEGER NOT NULL,
cpu_percent REAL NOT NULL,
uptime_sec INTEGER NOT NULL,
active_sessions INTEGER NOT NULL,
child_process_count INTEGER NOT NULL
);

View File

@ -1,13 +0,0 @@
import { integer, real, sqliteTable } from "drizzle-orm/sqlite-core";
export const hermesGatewayHealth = sqliteTable("hermes_gateway_health", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
alive: integer("alive").notNull(),
mainPid: integer("main_pid").notNull(),
rssBytes: integer("rss_bytes").notNull(),
cpuPercent: real("cpu_percent").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
activeSessions: integer("active_sessions").notNull(),
childProcessCount: integer("child_process_count").notNull(),
});

View File

@ -1,14 +1,90 @@
import { execFile } from "node:child_process";
import { hermesGatewayHealth } from "./schema.ts";
/** Keep subprocess deadlines slightly under typical sense timeout (30s). */
const EXEC_TIMEOUT_MS = 25_000;
/**
* When `ps` lacks `etimes` (wall-clock seconds since start), parse `etime`
* ([[dd-]hh:]mm:ss) into seconds. See ps(1) `etime` field description.
*/
function etimeToSeconds(etime) {
/** HTTP probe stays below EXEC_TIMEOUT_MS and sense timeout (30s). */
const HTTP_TIMEOUT_MS = Math.min(23_000, EXEC_TIMEOUT_MS - 2000);
const HTTP_ERROR_MAX_LEN = 256;
/** How many consecutive failures before triggering a restart. */
const FAILURE_THRESHOLD = 3;
type SenseState = {
consecutiveFailures: number;
lastRestartTs: number;
/** Minimum ms between restart attempts to avoid restart loops. */
restartCooldownMs: number;
};
export const initialState: SenseState = {
consecutiveFailures: 0,
lastRestartTs: 0,
restartCooldownMs: 300_000, // 5 minutes
};
function gatewayProbeUrl(): string {
const u =
process.env.HERMES_GATEWAY_HEALTH_URL ??
process.env.NERVE_HERMES_GATEWAY_URL ??
"";
return String(u).trim();
}
function truncateHttpError(err: unknown): string {
const raw =
err && typeof err === "object" && "code" in err && (err as { code: unknown }).code
? String((err as { code: unknown }).code)
: String((err as { message?: unknown } | null)?.message ?? err ?? "error");
const s = raw.trim() || "error";
return s.length > HTTP_ERROR_MAX_LEN ? s.slice(0, HTTP_ERROR_MAX_LEN) : s;
}
type HttpProbeResult = {
httpOk: number;
httpStatusCode: number;
httpLatencyMs: number;
httpError: string;
};
async function probeGatewayHttp(url: string): Promise<HttpProbeResult> {
if (!url) {
return {
httpOk: 0,
httpStatusCode: 0,
httpLatencyMs: 0,
httpError: "missing_url",
};
}
const t0 = Date.now();
try {
const signal = AbortSignal.timeout(HTTP_TIMEOUT_MS);
const res = await fetch(url, {
method: "GET",
signal,
redirect: "follow",
});
const httpLatencyMs = Date.now() - t0;
const code = res.status;
const ok = code >= 200 && code < 400;
return {
httpOk: ok ? 1 : 0,
httpStatusCode: code,
httpLatencyMs,
httpError: ok ? "" : truncateHttpError({ message: `HTTP ${code}` }),
};
} catch (err) {
const httpLatencyMs = Date.now() - t0;
return {
httpOk: 0,
httpStatusCode: 0,
httpLatencyMs,
httpError: truncateHttpError(err),
};
}
}
function etimeToSeconds(etime: string): number {
let s = String(etime).trim();
if (!s) return 0;
let days = 0;
@ -32,7 +108,14 @@ function etimeToSeconds(etime) {
return 0;
}
function execFileUtf8(file, args, opts = {}) {
type ExecResult = {
exitCode: number;
errCode: string | undefined;
stdout: string;
stderr: string;
};
function execFileUtf8(file: string, args: string[], opts: Record<string, unknown> = {}): Promise<ExecResult> {
return new Promise((resolve) => {
execFile(
file,
@ -42,13 +125,15 @@ function execFileUtf8(file, args, opts = {}) {
maxBuffer: 8 * 1024 * 1024,
timeout: EXEC_TIMEOUT_MS,
...opts,
},
} as Parameters<typeof execFile>[2],
(err, stdout, stderr) => {
const exitCode =
err && typeof err.status === "number" ? err.status : err ? -1 : 0;
err && typeof (err as NodeJS.ErrnoException).status === "number"
? (err as NodeJS.ErrnoException & { status: number }).status
: err ? -1 : 0;
resolve({
exitCode,
errCode: err?.code,
errCode: (err as NodeJS.ErrnoException | null)?.code,
stdout: String(stdout ?? ""),
stderr: String(stderr ?? ""),
});
@ -57,12 +142,12 @@ function execFileUtf8(file, args, opts = {}) {
});
}
function parseMainPidFromStatus(text) {
function parseMainPidFromStatus(text: string): number {
const m = text.match(/Main PID:\s*(\d+)/i);
return m ? Math.trunc(Number.parseInt(m[1], 10)) || 0 : 0;
}
function parseActiveLineFromStatus(text) {
function parseActiveLineFromStatus(text: string): { active: boolean; subRunning: boolean } {
for (const line of text.split("\n")) {
if (/^\s*Active:/i.test(line)) {
const m = line.match(/Active:\s*(\S+)\s*\(([^)]*)\)/i);
@ -77,7 +162,7 @@ function parseActiveLineFromStatus(text) {
return { active: false, subRunning: false };
}
function parseSystemctlShow(text) {
function parseSystemctlShow(text: string): { mainPid: number; active: boolean; subRunning: boolean } {
let mainPid = 0;
let active = false;
let subRunning = false;
@ -94,7 +179,7 @@ function parseSystemctlShow(text) {
return { mainPid, active, subRunning };
}
async function readSystemdState() {
async function readSystemdState(): Promise<{ mainPid: number; systemdActiveRunning: boolean }> {
const status = await execFileUtf8("systemctl", [
"--user",
"--no-pager",
@ -105,8 +190,7 @@ async function readSystemdState() {
let mainPid = parseMainPidFromStatus(combined);
let { active, subRunning } = parseActiveLineFromStatus(combined);
const needShow =
mainPid <= 0 || !active || !subRunning;
const needShow = mainPid <= 0 || !active || !subRunning;
if (needShow) {
const show = await execFileUtf8("systemctl", [
@ -131,14 +215,20 @@ async function readSystemdState() {
return { mainPid, systemdActiveRunning: active && subRunning };
}
async function processExists(mainPid) {
async function processExists(mainPid: number): Promise<boolean> {
if (mainPid <= 0) return false;
const r = await execFileUtf8("ps", ["-p", String(mainPid), "-o", "pid="]);
if (r.errCode === "ENOENT") return false;
return r.stdout.trim().length > 0;
}
async function readPsMetrics(mainPid) {
type PsMetrics = {
rssBytes: number;
cpuPercent: number;
uptimeSec: number;
};
async function readPsMetrics(mainPid: number): Promise<PsMetrics> {
if (mainPid <= 0) {
return { rssBytes: 0, cpuPercent: 0, uptimeSec: 0 };
}
@ -168,12 +258,8 @@ async function readPsMetrics(mainPid) {
const rssKiB = Number(parts[0]);
const cpu = Number(parts[1]);
const uptimeSec = etimeToSeconds(parts.slice(2).join(" "));
const rssBytes = Number.isFinite(rssKiB)
? Math.trunc(rssKiB * 1024)
: 0;
const cpuPercent = Number.isFinite(cpu)
? Math.round(cpu * 100) / 100
: 0;
const rssBytes = Number.isFinite(rssKiB) ? Math.trunc(rssKiB * 1024) : 0;
const cpuPercent = Number.isFinite(cpu) ? Math.round(cpu * 100) / 100 : 0;
return { rssBytes, cpuPercent, uptimeSec };
}
const rssKiB = Number(parts[0]);
@ -181,67 +267,16 @@ async function readPsMetrics(mainPid) {
const etimes = Number(parts[2]);
const rssBytes = Number.isFinite(rssKiB) ? Math.trunc(rssKiB * 1024) : 0;
const cpuPercent = Number.isFinite(cpu) ? Math.round(cpu * 100) / 100 : 0;
const uptimeSec = Number.isFinite(etimes)
? Math.trunc(etimes)
: 0;
const uptimeSec = Number.isFinite(etimes) ? Math.trunc(etimes) : 0;
return { rssBytes, cpuPercent, uptimeSec };
}
function parseActiveSessionsFromHermesStats(text) {
const src = String(text);
const patterns = [
/^\s*Active\s+sessions?:\s*(\d+)/gim,
/^\s*active\s+sessions?:\s*(\d+)/gim,
/^\s*Total\s+sessions?:\s*(\d+)/gim,
];
for (const re of patterns) {
re.lastIndex = 0;
const m = re.exec(src);
if (m) {
const n = Math.trunc(Number.parseInt(m[1], 10));
return Number.isFinite(n) ? n : 0;
}
}
return 0;
}
async function readActiveSessions() {
try {
const r = await execFileUtf8("hermes", ["sessions", "stats"]);
if (r.errCode === "ENOENT") return 0;
return parseActiveSessionsFromHermesStats(`${r.stdout}\n${r.stderr}`);
} catch {
return 0;
}
}
async function countDirectChildren(mainPid) {
if (mainPid <= 0) return 0;
try {
const r = await execFileUtf8("ps", [
"--no-headers",
"-o",
"pid",
"--ppid",
String(mainPid),
]);
if (r.errCode === "ENOENT") return 0;
const lines = r.stdout
.split("\n")
.map((l) => l.trim())
.filter(Boolean);
return lines.length;
} catch {
return 0;
}
}
export async function compute(db, _peers) {
const ts = Date.now();
export async function compute(prevState: SenseState) {
const now = Date.now();
// --- probe gateway ---
let mainPid = 0;
let systemdActiveRunning = false;
try {
const st = await readSystemdState();
mainPid = st.mainPid;
@ -274,48 +309,61 @@ export async function compute(db, _peers) {
}
}
const alive =
systemdActiveRunning && mainPid > 0 && psOk ? 1 : 0;
const alive = systemdActiveRunning && mainPid > 0 && psOk ? 1 : 0;
let activeSessions = 0;
let httpOk = 0;
let httpStatusCode = 0;
let httpLatencyMs = 0;
let httpError = "";
try {
activeSessions = await readActiveSessions();
const h = await probeGatewayHttp(gatewayProbeUrl());
httpOk = h.httpOk;
httpStatusCode = h.httpStatusCode;
httpLatencyMs = h.httpLatencyMs;
httpError = h.httpError;
} catch {
activeSessions = 0;
httpOk = 0;
httpStatusCode = 0;
httpLatencyMs = 0;
httpError = "probe_failed";
}
let childProcessCount = 0;
if (alive && mainPid > 0) {
try {
childProcessCount = await countDirectChildren(mainPid);
} catch {
childProcessCount = 0;
}
}
// --- decide health ---
const healthy = alive === 1 && httpOk === 1;
const storedMainPid = mainPid > 0 ? mainPid : 0;
// --- state machine: track consecutive failures ---
const consecutiveFailures = healthy ? 0 : prevState.consecutiveFailures + 1;
const lastRestartTs = prevState.lastRestartTs;
const cooldown = prevState.restartCooldownMs;
const cooldownElapsed = now - lastRestartTs >= cooldown;
const row = {
ts,
// --- trigger restart? ---
const shouldRestart =
consecutiveFailures >= FAILURE_THRESHOLD && cooldownElapsed;
const nextState: SenseState = {
consecutiveFailures,
lastRestartTs: shouldRestart ? now : lastRestartTs,
restartCooldownMs: cooldown,
};
const signal = {
ts: now,
alive,
mainPid: storedMainPid,
mainPid: mainPid > 0 ? mainPid : 0,
rssBytes: alive ? rssBytes : 0,
cpuPercent: alive ? cpuPercent : 0,
uptimeSec: alive ? uptimeSec : 0,
activeSessions,
childProcessCount: alive ? childProcessCount : 0,
httpOk,
httpStatusCode,
httpLatencyMs,
httpError,
consecutiveFailures,
};
await db.insert(hermesGatewayHealth).values(row);
const trigger = shouldRestart
? { command: "systemctl --user restart hermes-gateway" }
: null;
return {
ts: row.ts,
alive: row.alive,
mainPid: row.mainPid,
rssBytes: row.rssBytes,
cpuPercent: row.cpuPercent,
uptimeSec: row.uptimeSec,
activeSessions: row.activeSessions,
childProcessCount: row.childProcessCount,
};
return { state: nextState, signal, trigger };
}

View File

@ -1,121 +0,0 @@
import { createReadStream } from "node:fs";
import { readdir } from "node:fs/promises";
import { homedir } from "node:os";
import { join } from "node:path";
import { createInterface } from "node:readline";
import { hermesSessionMessageStats } from "./schema.ts";
const MEASUREMENT_WINDOW_MS = 900_000;
const MEASUREMENT_WINDOW_SECONDS = 900;
/**
* @param {string} filePath
* @param {number} cutoffMs
* @param {number} nowMs
* @returns {Promise<{ user: number; assistant: number; tool: number; fileHadActivity: boolean }>}
*/
async function aggregateJsonlFile(filePath, cutoffMs, nowMs) {
let user = 0;
let assistant = 0;
let tool = 0;
let fileHadActivity = false;
const input = createReadStream(filePath, { encoding: "utf8" });
const rl = createInterface({ input, crlfDelay: Infinity });
try {
for await (const line of rl) {
const trimmed = line.trim();
if (!trimmed) continue;
let obj;
try {
obj = JSON.parse(trimmed);
} catch {
continue;
}
if (typeof obj.role !== "string" || typeof obj.timestamp !== "string") {
continue;
}
const t = Date.parse(obj.timestamp);
if (!Number.isFinite(t) || t < cutoffMs || t > nowMs) continue;
const roleNorm = obj.role.trim().toLowerCase();
if (roleNorm === "user") {
user++;
fileHadActivity = true;
} else if (roleNorm === "assistant") {
assistant++;
fileHadActivity = true;
} else if (roleNorm === "tool") {
tool++;
fileHadActivity = true;
}
}
} finally {
rl.close();
}
return { user, assistant, tool, fileHadActivity };
}
export async function compute(db, _peers) {
const nowMs = Date.now();
const cutoffMs = nowMs - MEASUREMENT_WINDOW_MS;
const ts = nowMs;
let totalUserMessages = 0;
let totalAssistantMessages = 0;
let totalToolMessages = 0;
let activeSessions = 0;
const sessionsDir = join(homedir(), ".hermes", "sessions");
let files = [];
try {
const entries = await readdir(sessionsDir, { withFileTypes: true });
files = entries
.filter((e) => e.isFile() && e.name.endsWith(".jsonl"))
.map((e) => join(sessionsDir, e.name));
} catch (err) {
if (err && typeof err === "object" && "code" in err && err.code === "ENOENT") {
files = [];
} else {
throw err;
}
}
for (const filePath of files) {
const { user, assistant, tool, fileHadActivity } = await aggregateJsonlFile(
filePath,
cutoffMs,
nowMs,
);
totalUserMessages += user;
totalAssistantMessages += assistant;
totalToolMessages += tool;
if (fileHadActivity) activeSessions++;
}
const totalMessages =
totalUserMessages + totalAssistantMessages + totalToolMessages;
const row = {
ts,
totalUserMessages,
totalAssistantMessages,
totalToolMessages,
totalMessages,
activeSessions,
measurementWindowSeconds: MEASUREMENT_WINDOW_SECONDS,
};
await db.insert(hermesSessionMessageStats).values(row);
return {
ts: row.ts,
totalUserMessages: row.totalUserMessages,
totalAssistantMessages: row.totalAssistantMessages,
totalToolMessages: row.totalToolMessages,
totalMessages: row.totalMessages,
activeSessions: row.activeSessions,
measurementWindowSeconds: row.measurementWindowSeconds,
};
}

View File

@ -1,13 +0,0 @@
-- Migration: 0001_init
-- Creates the hermes_session_message_stats table for hermes-session-message-stats sense.
CREATE TABLE IF NOT EXISTS hermes_session_message_stats (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts INTEGER NOT NULL,
total_user_messages INTEGER NOT NULL,
total_assistant_messages INTEGER NOT NULL,
total_tool_messages INTEGER NOT NULL,
total_messages INTEGER NOT NULL,
active_sessions INTEGER NOT NULL,
measurement_window_seconds INTEGER NOT NULL
);

View File

@ -1,12 +0,0 @@
import { integer, sqliteTable } from "drizzle-orm/sqlite-core";
export const hermesSessionMessageStats = sqliteTable("hermes_session_message_stats", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
totalUserMessages: integer("total_user_messages").notNull(),
totalAssistantMessages: integer("total_assistant_messages").notNull(),
totalToolMessages: integer("total_tool_messages").notNull(),
totalMessages: integer("total_messages").notNull(),
activeSessions: integer("active_sessions").notNull(),
measurementWindowSeconds: integer("measurement_window_seconds").notNull(),
});

View File

@ -1,87 +0,0 @@
import { loadavg, totalmem, freemem, uptime } from "node:os";
import { execSync } from "node:child_process";
import { readFile } from "node:fs/promises";
import { snapshots } from "./schema.ts";
const SOCKSTAT_PATH = "/proc/net/sockstat";
function parseSockstat(content) {
let socketsUsed = 0, tcpInuse = 0, tcpOrphan = 0, tcpTw = 0, tcpAlloc = 0, tcpMemPages = 0;
for (const line of content.split("\n")) {
const trimmed = line.trim();
if (trimmed.startsWith("sockets:")) {
const parts = trimmed.split(/\s+/);
const idx = parts.indexOf("used");
if (idx !== -1 && idx + 1 < parts.length) {
socketsUsed = Number.parseInt(parts[idx + 1], 10) || 0;
}
} else if (trimmed.startsWith("TCP:")) {
const parts = trimmed.split(/\s+/);
const map = {};
for (let i = 1; i + 1 < parts.length; i += 2) {
map[parts[i]] = Number.parseInt(parts[i + 1], 10) || 0;
}
tcpInuse = map.inuse ?? 0;
tcpOrphan = map.orphan ?? 0;
tcpTw = map.tw ?? 0;
tcpAlloc = map.alloc ?? 0;
tcpMemPages = map.mem ?? 0;
}
}
return { socketsUsed, tcpInuse, tcpOrphan, tcpTw, tcpAlloc, tcpMemPages };
}
export async function compute(db, _peers) {
const [load1, load5, load15] = loadavg();
const memTotal = totalmem();
const memFree = freemem();
const memUsed = memTotal - memFree;
const memTotalMB = Math.round(memTotal / 1024 / 1024);
const memUsedMB = Math.round(memUsed / 1024 / 1024);
const memUsedPct = Math.round((memUsed / memTotal) * 10000) / 100;
let diskTotalGB = 0, diskUsedGB = 0, diskUsedPct = 0;
try {
const df = execSync("df -B1 / | tail -1", { encoding: "utf-8" }).trim();
const parts = df.split(/\s+/);
const total = Number(parts[1]);
const used = Number(parts[2]);
diskTotalGB = Math.round(total / 1024 / 1024 / 1024 * 100) / 100;
diskUsedGB = Math.round(used / 1024 / 1024 / 1024 * 100) / 100;
diskUsedPct = total > 0 ? Math.round((used / total) * 10000) / 100 : 0;
} catch {}
// TCP socket stats
let tcp = { socketsUsed: 0, tcpInuse: 0, tcpOrphan: 0, tcpTw: 0, tcpAlloc: 0, tcpMemPages: 0 };
try {
const content = await readFile(SOCKSTAT_PATH, "utf8");
tcp = parseSockstat(content);
} catch {}
const ts = Date.now();
const uptimeSec = Math.round(uptime());
await db.insert(snapshots).values({
ts, cpuLoad1m: load1, cpuLoad5m: load5, cpuLoad15m: load15,
memTotalMB, memUsedMB, memUsedPct,
diskTotalGB, diskUsedGB, diskUsedPct,
uptimeSec,
socketsUsed: tcp.socketsUsed,
tcpInuse: tcp.tcpInuse,
tcpOrphan: tcp.tcpOrphan,
tcpTw: tcp.tcpTw,
tcpAlloc: tcp.tcpAlloc,
tcpMemPages: tcp.tcpMemPages,
});
return {
cpu: { load1m: load1, load5m: load5, load15m: load15 },
memory: { totalMB: memTotalMB, usedMB: memUsedMB, usedPct: memUsedPct },
disk: { totalGB: diskTotalGB, usedGB: diskUsedGB, usedPct: diskUsedPct },
tcp: { socketsUsed: tcp.socketsUsed, inuse: tcp.tcpInuse, orphan: tcp.tcpOrphan, tw: tcp.tcpTw, alloc: tcp.tcpAlloc, memPages: tcp.tcpMemPages },
uptimeSec,
};
}

View File

@ -1,16 +0,0 @@
-- Migration: 0001_init
-- Creates the snapshots table for linux-system-health sense.
CREATE TABLE IF NOT EXISTS snapshots (
ts INTEGER PRIMARY KEY,
cpu_load_1m REAL NOT NULL,
cpu_load_5m REAL NOT NULL,
cpu_load_15m REAL NOT NULL,
mem_total_mb INTEGER NOT NULL,
mem_used_mb INTEGER NOT NULL,
mem_used_pct REAL NOT NULL,
disk_total_gb REAL NOT NULL,
disk_used_gb REAL NOT NULL,
disk_used_pct REAL NOT NULL,
uptime_sec INTEGER NOT NULL
);

View File

@ -1,6 +0,0 @@
ALTER TABLE snapshots ADD COLUMN sockets_used INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_inuse INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_orphan INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_tw INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_alloc INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_mem_pages INTEGER;

View File

@ -1,22 +0,0 @@
import { integer, real, sqliteTable, text } from "drizzle-orm/sqlite-core";
export const snapshots = sqliteTable("snapshots", {
ts: integer("ts").primaryKey(),
cpuLoad1m: real("cpu_load_1m").notNull(),
cpuLoad5m: real("cpu_load_5m").notNull(),
cpuLoad15m: real("cpu_load_15m").notNull(),
memTotalMB: integer("mem_total_mb").notNull(),
memUsedMB: integer("mem_used_mb").notNull(),
memUsedPct: real("mem_used_pct").notNull(),
diskTotalGB: real("disk_total_gb").notNull(),
diskUsedGB: real("disk_used_gb").notNull(),
diskUsedPct: real("disk_used_pct").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
// TCP socket stats (merged from linux-tcp-socket-stats)
socketsUsed: integer("sockets_used"),
tcpInuse: integer("tcp_inuse"),
tcpOrphan: integer("tcp_orphan"),
tcpTw: integer("tcp_tw"),
tcpAlloc: integer("tcp_alloc"),
tcpMemPages: integer("tcp_mem_pages"),
});

View File

@ -7,7 +7,13 @@
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"allowImportingTsExtensions": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
"include": [
"senses/**/*.ts",
"workflows/**/*.ts",
"scripts/**/*.ts",
"workflows/_shared/**/*.ts"
]
}

View File

@ -0,0 +1,33 @@
import { join } from "node:path";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createDevelopSenseWorkflow } from "@uncaged/nerve-workflow-meta";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const apiKey = process.env.DASHSCOPE_API_KEY;
const baseUrl = process.env.DASHSCOPE_BASE_URL;
const model = process.env.DASHSCOPE_MODEL ?? "qwen-plus";
if (!apiKey || !baseUrl) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createDevelopSenseWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
planner: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
coder: cursorAdapter,
},
extract: { provider: { apiKey, baseUrl, model } },
cwd: NERVE_ROOT,
});
export default workflow;

View File

@ -0,0 +1,34 @@
import { join } from "node:path";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createDevelopWorkflowWorkflow } from "@uncaged/nerve-workflow-meta";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const apiKey = process.env.DASHSCOPE_API_KEY;
const baseUrl = process.env.DASHSCOPE_BASE_URL;
const model = process.env.DASHSCOPE_MODEL ?? "qwen-plus";
if (!apiKey || !baseUrl) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createDevelopWorkflowWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
planner: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
coder: cursorAdapter,
},
extract: { provider: { apiKey, baseUrl, model } },
nerveRoot: NERVE_ROOT,
});
export default workflow;

View File

@ -0,0 +1,33 @@
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createLlmAdapter } from "@uncaged/nerve-workflow-utils";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
import { createAnswererRole } from "./roles/answerer.js";
import { createExplorerRole } from "./roles/explorer.js";
import { createQuestionerRole } from "./roles/questioner.js";
export type CreateKnowledgeExtractionDeps = {
defaultAdapter: AgentFn;
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
extract: LlmExtractorConfig;
};
export function createKnowledgeExtractionWorkflow({
defaultAdapter,
adapters,
extract,
}: CreateKnowledgeExtractionDeps): WorkflowDefinition<WorkflowMeta> {
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
const llmAdapter = createLlmAdapter(extract.provider);
return {
name: "extract-knowledge",
roles: {
questioner: createQuestionerRole(adapters?.questioner ?? llmAdapter, { extract }),
answerer: createAnswererRole(adapters?.answerer ?? llmAdapter, { extract }),
explorer: createExplorerRole(a("explorer"), { extract }),
},
moderator,
};
}

View File

@ -0,0 +1,30 @@
import { join } from "node:path";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createKnowledgeExtractionWorkflow } from "./build.js";
import { resolveDashScopeProvider } from "../solve-issue/lib/provider.js";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const provider = await resolveDashScopeProvider(NERVE_ROOT);
if (provider === null) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createKnowledgeExtractionWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
explorer: createCursorAdapter({
type: "cursor",
model: "claude-sonnet-4",
timeout: CURSOR_TIMEOUT_MS,
}),
},
extract: { provider },
});
export default workflow;

View File

@ -0,0 +1,74 @@
import type { Dirent } from "node:fs";
import { readdir } from "node:fs/promises";
import { join } from "node:path";
import type { StartStep, WorkflowMessage } from "@uncaged/nerve-core";
import type { ExplorerMeta } from "../roles/explorer.js";
import type { QuestionerMeta } from "../roles/questioner.js";
async function walkMarkdownFiles(rootDir: string, base: string): Promise<string[]> {
const out: string[] = [];
let entries: Dirent[];
try {
entries = (await readdir(rootDir, { withFileTypes: true })) as Dirent[];
} catch {
return out;
}
for (const e of entries) {
const name = e.name;
const rel = base ? `${base}/${name}` : name;
const full = join(rootDir, name);
if (e.isDirectory()) {
out.push(...(await walkMarkdownFiles(full, rel)));
} else if (e.isFile() && name.endsWith(".md")) {
out.push(rel.replace(/\\/g, "/"));
}
}
return out;
}
/** Enumerate all markdown files under `.knowledge/` as repo-relative paths; seed line first if present. */
export async function bootstrapKnowledgeQueue(cwd: string, startContent: string): Promise<string[]> {
const knowledgeDir = join(cwd, ".knowledge");
const relFiles = await walkMarkdownFiles(knowledgeDir, "");
const paths = relFiles.map((f) => `.knowledge/${f}`);
const seed = startContent.trim().split(/\r?\n/u)[0]?.trim() ?? "";
if (paths.length === 0 && seed.length > 0) {
return [seed];
}
if (seed.length > 0 && paths.includes(seed)) {
return [seed, ...paths.filter((p) => p !== seed)];
}
if (seed.length > 0 && !paths.includes(seed)) {
return [seed, ...paths];
}
return [...paths].sort();
}
function lastIndexOfRole(messages: WorkflowMessage[], role: string): number {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === role) return i;
}
return -1;
}
/** Next queue for questioner: bootstrap, or continue after answerer / explorer. */
export async function resolveQueueForQuestioner(
start: StartStep,
messages: WorkflowMessage[],
cwd: string,
): Promise<string[]> {
const lastQi = lastIndexOfRole(messages, "questioner");
if (lastQi === -1) {
return bootstrapKnowledgeQueue(cwd, start.content);
}
const qMeta = messages[lastQi].meta as QuestionerMeta;
const tail = messages.slice(lastQi + 1);
const explorerMsg = tail.find((m) => m.role === "explorer");
if (explorerMsg) {
const eMeta = explorerMsg.meta as ExplorerMeta;
return [...qMeta.remaining_queue, ...eMeta.new_cards];
}
return qMeta.remaining_queue;
}

View File

@ -0,0 +1,21 @@
import type { StartStep } from "@uncaged/nerve-core";
type StartMetaWithWorkdir = StartStep["meta"] & { workdir?: string | null };
/**
* Resolve the target repo working directory.
* Priority: start.meta.workdir prompt second line (if absolute path) cwd.
*/
export function resolveWorkdir(start: StartStep): string {
const m = start.meta as StartMetaWithWorkdir;
if (m.workdir) return m.workdir;
// Allow prompt to carry workdir on the second line: "seed\n/abs/path"
const lines = start.content.split(/\r?\n/);
if (lines.length >= 2) {
const candidate = lines[1]!.trim();
if (candidate.startsWith("/")) return candidate;
}
return process.cwd();
}

View File

@ -0,0 +1,84 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator, ThreadContext } from "@uncaged/nerve-core";
import type { AnswererMeta } from "./roles/answerer.js";
import type { ExplorerMeta } from "./roles/explorer.js";
import type { QuestionerMeta } from "./roles/questioner.js";
export type WorkflowMeta = {
questioner: QuestionerMeta;
answerer: AnswererMeta;
explorer: ExplorerMeta;
};
type Steps = ThreadContext<WorkflowMeta>["steps"];
function lastQuestionerRemaining(steps: Steps): QuestionerMeta | undefined {
for (let i = steps.length - 1; i >= 0; i--) {
const s = steps[i];
if (s.role === "questioner") return s.meta;
}
return undefined;
}
/** End when the last two explorer invocations both added no new cards (issue #266 stagnation rule). */
function lastTwoExplorerRunsBothEmpty(steps: Steps): boolean {
const explorerSteps = steps.filter((s) => s.role === "explorer");
if (explorerSteps.length < 2) return false;
const e1 = explorerSteps[explorerSteps.length - 1].meta as ExplorerMeta;
const e2 = explorerSteps[explorerSteps.length - 2].meta as ExplorerMeta;
return e1.new_cards.length === 0 && e2.new_cards.length === 0;
}
function queueAfterSkippedExplorer(steps: Steps): string[] {
const q = lastQuestionerRemaining(steps);
return q?.remaining_queue ?? [];
}
function queueAfterExplorerStep(steps: Steps): string[] {
const last = steps[steps.length - 1];
if (!last || last.role !== "explorer") return [];
const q = lastQuestionerRemaining(steps);
if (!q) return [];
const e = last.meta as ExplorerMeta;
return [...q.remaining_queue, ...e.new_cards];
}
export const moderator: Moderator<WorkflowMeta> = (context) => {
const { steps } = context;
if (steps.length === 0) {
return "questioner";
}
const last = steps[steps.length - 1];
if (last.role === "questioner") {
return "answerer";
}
if (last.role === "answerer") {
const am = last.meta as AnswererMeta;
if (am.has_unanswered) {
return "explorer";
}
const q = queueAfterSkippedExplorer(steps);
if (q.length === 0) {
return END;
}
return "questioner";
}
if (last.role === "explorer") {
if (lastTwoExplorerRunsBothEmpty(steps)) {
return END;
}
const q = queueAfterExplorerStep(steps);
if (q.length === 0) {
return END;
}
return "questioner";
}
return END;
};

View File

@ -0,0 +1,102 @@
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, nerveCommandEnv, spawnSafe } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveWorkdir } from "../lib/workdir.js";
import type { QuestionerMeta } from "./questioner.js";
export const answererMetaSchema = z.object({
results: z.array(
z.object({
id: z.string(),
found: z.boolean(),
source: z.string(),
note: z.string(),
}),
),
has_unanswered: z.boolean(),
});
export type AnswererMeta = z.infer<typeof answererMetaSchema>;
export type CreateAnswererRoleDeps = {
extract: LlmExtractorConfig;
};
function lastQuestionerMeta(messages: WorkflowMessage[]): QuestionerMeta | undefined {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === "questioner") {
return messages[i].meta as QuestionerMeta;
}
}
return undefined;
}
export async function answererPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
throw new Error("answerer: prompt invoked without questioner questions — wrapped role should short-circuit");
}
const blocks: string[] = [];
for (const q of qm.questions) {
if ((ctx.start.meta as Record<string, unknown>).dryRun) {
blocks.push(`### ${q.id}\n[dryRun] skipped nerve knowledge query\n`);
continue;
}
const res = await spawnSafe(
"nerve",
["knowledge", "query", q.question],
{
cwd,
env: nerveCommandEnv(),
timeoutMs: 120_000,
dryRun: false,
abortSignal: null,
},
);
if (res.ok) {
blocks.push(`### ${q.id} (${q.domain})\nQuestion: ${q.question}\n---\n${res.value.stdout}\n`);
} else {
const err = res.error;
const detail =
err.kind === "non_zero_exit"
? `exit ${err.exitCode}\n${err.stderr}`
: err.kind === "timeout"
? `timeout\n${err.stderr}`
: err.kind === "spawn_failed"
? err.message
: "aborted";
blocks.push(`### ${q.id}\nnerve knowledge query failed: ${detail}\n`);
}
}
return [
"You are the **answerer**. You MUST NOT read repository source code — only the CLI retrieval excerpts below.",
"For each question id, decide whether the knowledge base already answers it.",
"Set found=true only when the excerpt supports a confident answer; otherwise found=false.",
"Set has_unanswered=true if any question remains unanswered by the knowledge base.",
"",
...blocks,
].join("\n");
}
export function createAnswererRole(adapter: AgentFn, { extract }: CreateAnswererRoleDeps): Role<AnswererMeta> {
const inner = createRole(adapter, answererPrompt, answererMetaSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
return {
content: "answerer: no questions from questioner; skipping CLI lookup.",
meta: { results: [], has_unanswered: false },
};
}
return inner(ctx);
};
}

View File

@ -0,0 +1,93 @@
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveWorkdir } from "../lib/workdir.js";
import type { AnswererMeta } from "./answerer.js";
import type { QuestionerMeta } from "./questioner.js";
export const explorerMetaSchema = z.object({
patches: z.array(
z.object({
card: z.string(),
section: z.string(),
}),
),
new_cards: z.array(z.string()),
});
export type ExplorerMeta = z.infer<typeof explorerMetaSchema>;
export type CreateExplorerRoleDeps = {
extract: LlmExtractorConfig;
};
function lastMeta<M>(messages: WorkflowMessage[], role: string): M | undefined {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === role) {
return messages[i].meta as M;
}
}
return undefined;
}
export function explorerPrompt(ctx: ThreadContext): string {
const messages = ctx.steps as unknown as WorkflowMessage[];
const threadId = ctx.start.meta.threadId;
const qm = lastMeta<QuestionerMeta>(messages, "questioner");
const am = lastMeta<AnswererMeta>(messages, "answerer");
const cwd = resolveWorkdir(ctx.start);
const unanswered =
am?.results.filter((r) => !r.found).map((r) => r.id) ?? [];
return `You are the **explorer** in an extract-knowledge workflow.
## Context
- Thread: \`nerve thread ${threadId}\`
- Working directory (repo root for paths): ${cwd}
- Current knowledge card (questioner): ${qm?.card ?? "(unknown)"}
## Unanswered question ids
${JSON.stringify(unanswered)}
Use the prior answerer results in the thread to map ids to full question text when you read messages above.
## Task
For each unanswered question, **read the codebase** as needed, then either:
- Add a new markdown file under \`.knowledge/\`, or
- Patch an existing card (prefer updating the card listed above when appropriate).
After any write or patch to \`.knowledge\`, run:
\`\`\`bash
nerve knowledge sync
\`\`\`
from this repo root (${cwd}), and fix failures until sync succeeds.
## Output meta
Report \`patches\` as { card, section } entries for cards you edited (section is a short heading or path hint).
Report \`new_cards\` as repo-relative paths for brand-new files you created (e.g. \`.knowledge/new-topic.md\`).
Do not claim work you did not perform.`;
}
export function createExplorerRole(
adapter: AgentFn,
{ extract }: CreateExplorerRoleDeps,
): Role<ExplorerMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => explorerPrompt(ctx),
explorerMetaSchema,
extract,
);
}

View File

@ -0,0 +1,108 @@
import { readFile } from "node:fs/promises";
import { join } from "node:path";
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveQueueForQuestioner } from "../lib/knowledge-queue.js";
import { resolveWorkdir } from "../lib/workdir.js";
const questionerExtractSchema = z.object({
questions: z
.array(
z.object({
id: z.string(),
question: z.string(),
domain: z.string(),
}),
)
.length(5),
});
export type QuestionerMeta = {
/** Empty when no .knowledge cards and no work to do. */
card: string;
questions: { id: string; question: string; domain: string }[];
remaining_queue: string[];
};
export type CreateQuestionerRoleDeps = {
extract: LlmExtractorConfig;
};
function questionerSystem(): string {
return `You are the **questioner** in an extract-knowledge workflow.
Read the given markdown knowledge card. Propose exactly **five** technical questions that are **not** already answered or covered by that card.
Rules:
- Questions must be concrete and technical.
- Each question needs a stable string id (e.g. q1, q2, q3, q4, q5), a short domain label (e.g. routing, storage), and the question text.
- Do not assume access to other files or tools reason only from the card content shown.`;
}
function questionerUser(card: string, cardBody: string, remainingHint: string[]): string {
return `Current card path: ${card}
Remaining queue after this card (paths, may be empty): ${JSON.stringify(remainingHint)}
--- Card content ---
${cardBody}`;
}
export async function questionerPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
if (queue.length === 0) {
throw new Error(
"questioner: prompt invoked with empty queue — wrapped role should short-circuit before LLM",
);
}
const card = queue[0]!;
const remaining_queue = queue.slice(1);
let cardBody: string;
try {
cardBody = await readFile(join(cwd, card), "utf8");
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
throw new Error(`questioner: failed to read ${card}: ${msg}`);
}
return `${questionerSystem()}\n\n${questionerUser(card, cardBody, remaining_queue)}`;
}
export function createQuestionerRole(adapter: AgentFn, { extract }: CreateQuestionerRoleDeps): Role<QuestionerMeta> {
const inner = createRole(adapter, questionerPrompt, questionerExtractSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
if (queue.length === 0) {
return {
content:
"questioner: no `.knowledge` markdown files found and no seed path in the trigger prompt; queue is empty.",
meta: {
card: "",
questions: [],
remaining_queue: [],
},
};
}
const card = queue[0]!;
const remaining_queue = queue.slice(1);
const r = await inner(ctx);
return {
content: r.content,
meta: {
card,
questions: r.meta.questions,
remaining_queue,
},
};
};
}

View File

@ -1,86 +0,0 @@
import type {
ModeratorContext,
RoleResult,
StartStep,
WorkflowDefinition,
WorkflowMessage,
} from "@uncaged/nerve-core";
import { END } from "@uncaged/nerve-core";
type WorkflowMeta = {
greeter: {
name: string;
error: string | null;
};
};
const DEFAULT_NAME = "friend";
function resolveNameFromContent(content: string): { name: string; error: string | null } {
const trimmed = content.trim();
if (trimmed === "") {
return { name: DEFAULT_NAME, error: "empty_input" };
}
let jsonParsed: unknown;
let parseOk: boolean;
try {
jsonParsed = JSON.parse(trimmed);
parseOk = true;
} catch {
parseOk = false;
}
if (parseOk) {
if (jsonParsed !== null && typeof jsonParsed === "object" && !Array.isArray(jsonParsed)) {
const nameField = (jsonParsed as Record<string, unknown>).name;
if (typeof nameField === "string") {
const n = nameField.trim();
if (n !== "") {
return { name: n, error: null };
}
return { name: DEFAULT_NAME, error: "name_empty" };
}
return { name: DEFAULT_NAME, error: "missing_name" };
}
return { name: DEFAULT_NAME, error: "invalid_json_shape" };
}
return { name: trimmed, error: null };
}
async function greeter(
start: StartStep,
_messages: WorkflowMessage[],
): Promise<RoleResult<WorkflowMeta["greeter"]>> {
try {
const { name, error } = resolveNameFromContent(start.content);
return {
content: `Hello, ${name}!`,
meta: { name, error },
};
} catch (unhandled) {
const msg = unhandled instanceof Error ? unhandled.message : String(unhandled);
return {
content: `Hello, ${DEFAULT_NAME}!`,
meta: { name: DEFAULT_NAME, error: `internal_error: ${msg}` },
};
}
}
const workflow: WorkflowDefinition<WorkflowMeta> = {
name: "hello-world",
roles: { greeter },
moderator(context: ModeratorContext<WorkflowMeta>) {
if (context.steps.length === 0) {
return "greeter";
}
const last = context.steps[context.steps.length - 1];
if (last.role === "greeter") {
return END;
}
return END;
},
};
export default workflow;

View File

@ -1,21 +0,0 @@
{
"name": "hello-world-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"dependencies": {
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest"
},
"devDependencies": {
"@types/node": "^22.0.0",
"typescript": "^5.7.0"
},
"pnpm": {
"overrides": {
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
}
}
}

View File

@ -1,51 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
importers:
.:
dependencies:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
typescript:
specifier: ^5.7.0
version: 5.9.3
packages:
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
snapshots:
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
typescript@5.9.3: {}
undici-types@6.21.0: {}

View File

@ -1,13 +0,0 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +0,0 @@
{
"name": "pr-code-reviewer-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"dependencies": {
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"typescript": "^5.7.0"
},
"pnpm": {
"overrides": {
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
}
}
}

View File

@ -1,59 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
importers:
.:
dependencies:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
typescript:
specifier: ^5.7.0
version: 5.9.3
packages:
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
snapshots:
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
typescript@5.9.3: {}
undici-types@6.21.0: {}
zod@4.3.6: {}

View File

@ -1,13 +0,0 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
}

View File

@ -1,575 +0,0 @@
/**
* PR Gitea PR diff LLM Markdown
* 宿 nerve.yaml workflows.pr-summarizer
* nerve workflow trigger pr-summarizer --payload '{"prompt":"<PR URL 或 JSON>"}'
* Sense workflow: `pr-summarizer|50|<prompt>` parseSenseWorkflowDirective
*/
import type {
ModeratorContext,
RoleResult,
StartStep,
WorkflowDefinition,
WorkflowMessage,
} from "@uncaged/nerve-core";
import { END } from "@uncaged/nerve-core";
import {
isDryRun,
llmExtract,
nerveAgentContext,
readNerveYaml,
spawnSafe,
} from "@uncaged/nerve-workflow-utils";
import { join } from "node:path";
import { z } from "zod";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
/** unified diff 写入 meta 前的最大字符数(超出则截断并在 content 中说明) */
const DIFF_TEXT_MAX_CHARS = 1_500_000;
/** 送给分析模型的 diff 前缀长度上限 */
const DIFF_LLM_MAX_CHARS = 100_000;
type PrSummarizerMeta = {
fetcher: {
prUrl: string | null;
owner: string | null;
repo: string | null;
prIndex: number | null;
giteaBaseUrl: string | null;
title: string | null;
state: string | null;
diffText: string | null;
diffByteLength: number | null;
httpStatus: number | null;
errorMessage: string | null;
};
analyzer: {
analysisMarkdown: string | null;
providerModel: string | null;
errorMessage: string | null;
};
writer: {
summaryZhMarkdown: string | null;
errorMessage: string | null;
};
};
const jsonPromptSchema = z.object({
prUrl: z.string().nullish(),
owner: z.string().nullish(),
repo: z.string().nullish(),
index: z.number().int().positive().nullish(),
baseUrl: z.string().nullish(),
});
const analysisExtractSchema = z
.object({
analysisMarkdown: z.string().describe("Technical PR analysis in Markdown (can be English)."),
})
.describe("Structured PR analysis from the diff.");
const summaryExtractSchema = z
.object({
summaryZhMarkdown: z
.string()
.describe(
"Final deliverable: Chinese Markdown with title, key changes, risks, and test suggestions.",
),
})
.describe("Chinese Markdown PR summary.");
function getNerveYaml(): string {
const result = readNerveYaml({ nerveRoot: NERVE_ROOT });
return result.ok ? result.value : "# nerve.yaml unavailable";
}
async function cfgGet(key: string): Promise<string | null> {
const result = await spawnSafe("cfg", ["get", key], {
cwd: NERVE_ROOT,
env: null,
timeoutMs: 10_000,
});
if (!result.ok) {
return null;
}
return result.value.stdout.trim() || null;
}
async function resolveDashScopeProvider(): Promise<{
baseUrl: string;
apiKey: string;
model: string;
} | null> {
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet("DASHSCOPE_API_KEY"));
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet("DASHSCOPE_BASE_URL"));
const model =
process.env.DASHSCOPE_MODEL ?? (await cfgGet("DASHSCOPE_MODEL")) ?? "qwen-plus";
if (!apiKey || !baseUrl) {
return null;
}
return { apiKey, baseUrl, model };
}
function parseGiteaPullUrl(raw: string): {
giteaBaseUrl: string;
owner: string;
repo: string;
prIndex: number;
prUrl: string;
} | null {
let u: URL;
try {
u = new URL(raw.trim());
} catch {
return null;
}
if (u.protocol !== "http:" && u.protocol !== "https:") {
return null;
}
const parts = u.pathname.replace(/\/+$/, "").split("/").filter(Boolean);
const pullsAt = parts.indexOf("pulls");
if (pullsAt < 2 || pullsAt + 1 >= parts.length) {
return null;
}
const indexStr = parts[pullsAt + 1];
if (!indexStr || !/^\d+$/.test(indexStr)) {
return null;
}
const owner = parts[pullsAt - 2];
const repo = parts[pullsAt - 1];
if (!owner || !repo) {
return null;
}
const prIndex = Number.parseInt(indexStr, 10);
if (!Number.isFinite(prIndex) || prIndex < 1) {
return null;
}
const giteaBaseUrl = `${u.protocol}//${u.host}`;
return { giteaBaseUrl, owner, repo, prIndex, prUrl: raw.trim() };
}
type ResolvedPr = {
prUrl: string | null;
owner: string | null;
repo: string | null;
prIndex: number | null;
giteaBaseUrl: string | null;
parseError: string | null;
};
function resolvePrFromContent(content: string): ResolvedPr {
const empty: ResolvedPr = {
prUrl: null,
owner: null,
repo: null,
prIndex: null,
giteaBaseUrl: null,
parseError: null,
};
const trimmed = content.trim();
if (!trimmed) {
return { ...empty, parseError: "Empty prompt" };
}
if (trimmed.startsWith("{")) {
let parsed: unknown;
try {
parsed = JSON.parse(trimmed) as unknown;
} catch {
return { ...empty, parseError: "Invalid JSON in prompt" };
}
const row = jsonPromptSchema.safeParse(parsed);
if (!row.success) {
return { ...empty, parseError: `JSON validation failed: ${row.error.message}` };
}
const j = row.data;
let owner: string | null = j.owner ?? null;
let repo: string | null = j.repo ?? null;
let prIndex: number | null = j.index ?? null;
let giteaBaseUrl: string | null = j.baseUrl ?? null;
let prUrl: string | null = j.prUrl ?? null;
if (j.prUrl) {
const p = parseGiteaPullUrl(j.prUrl);
if (p) {
owner = owner ?? p.owner;
repo = repo ?? p.repo;
prIndex = prIndex ?? p.prIndex;
giteaBaseUrl = giteaBaseUrl ?? p.giteaBaseUrl;
prUrl = prUrl ?? p.prUrl;
}
}
if (owner && repo && prIndex !== null && giteaBaseUrl) {
const normalizedBase = giteaBaseUrl.replace(/\/+$/, "");
const builtUrl = `${normalizedBase}/${owner}/${repo}/pulls/${prIndex}`;
return {
prUrl: prUrl ?? builtUrl,
owner,
repo,
prIndex,
giteaBaseUrl: normalizedBase,
parseError: null,
};
}
return {
...empty,
parseError: "JSON prompt must include resolvable owner, repo, pr index, and baseUrl (or prUrl)",
};
}
const p = parseGiteaPullUrl(trimmed);
if (!p) {
return {
...empty,
parseError: "Not a valid Gitea PR URL (expected https://host/owner/repo/pulls/NUMBER)",
};
}
return {
prUrl: p.prUrl,
owner: p.owner,
repo: p.repo,
prIndex: p.prIndex,
giteaBaseUrl: p.giteaBaseUrl.replace(/\/+$/, ""),
parseError: null,
};
}
function emptyFetcherMeta(): PrSummarizerMeta["fetcher"] {
return {
prUrl: null,
owner: null,
repo: null,
prIndex: null,
giteaBaseUrl: null,
title: null,
state: null,
diffText: null,
diffByteLength: null,
httpStatus: null,
errorMessage: null,
};
}
const workflow: WorkflowDefinition<PrSummarizerMeta> = {
name: "pr-summarizer",
roles: {
async fetcher(start: StartStep): Promise<RoleResult<PrSummarizerMeta["fetcher"]>> {
const resolved = resolvePrFromContent(start.content);
if (resolved.parseError !== null) {
const meta: PrSummarizerMeta["fetcher"] = {
...emptyFetcherMeta(),
errorMessage: resolved.parseError,
};
return { content: `Fetcher: parse error — ${resolved.parseError}`, meta };
}
const token = process.env.GITEA_TOKEN ?? null;
if (!token || token.trim() === "") {
const meta: PrSummarizerMeta["fetcher"] = {
...emptyFetcherMeta(),
prUrl: resolved.prUrl,
owner: resolved.owner,
repo: resolved.repo,
prIndex: resolved.prIndex,
giteaBaseUrl: resolved.giteaBaseUrl,
errorMessage: "GITEA_TOKEN is not set",
};
return { content: "Fetcher: missing GITEA_TOKEN (set env before running).", meta };
}
const apiRoot = `${resolved.giteaBaseUrl}/api/v1`;
const pullJsonUrl = `${apiRoot}/repos/${resolved.owner}/${resolved.repo}/pulls/${resolved.prIndex}`;
const pullDiffUrl = `${pullJsonUrl}.diff`;
const headersJson: Record<string, string> = {
Authorization: `token ${token}`,
Accept: "application/json",
};
let title: string | null = null;
let state: string | null = null;
let httpStatus: number | null = null;
let jsonError: string | null = null;
try {
const prRes = await fetch(pullJsonUrl, { headers: headersJson });
httpStatus = prRes.status;
const bodyText = await prRes.text();
if (!prRes.ok) {
jsonError = `GET PR JSON failed: HTTP ${prRes.status} ${bodyText.slice(0, 500)}`;
} else {
const data = JSON.parse(bodyText) as Record<string, unknown>;
const t = data.title;
const s = data.state;
title = typeof t === "string" ? t : null;
state = typeof s === "string" ? s : null;
}
} catch (e) {
jsonError = e instanceof Error ? e.message : String(e);
}
let diffText: string | null = null;
let diffByteLength: number | null = null;
let diffError: string | null = jsonError;
let diffCharTruncated = false;
if (jsonError === null) {
try {
const diffRes = await fetch(pullDiffUrl, {
headers: {
Authorization: `token ${token}`,
Accept: "text/plain",
},
});
httpStatus = diffRes.status;
const rawDiff = await diffRes.text();
if (!diffRes.ok) {
diffError = `GET PR diff failed: HTTP ${diffRes.status} ${rawDiff.slice(0, 500)}`;
} else {
diffByteLength = Buffer.byteLength(rawDiff, "utf8");
if (rawDiff.length > DIFF_TEXT_MAX_CHARS) {
diffText = rawDiff.slice(0, DIFF_TEXT_MAX_CHARS);
diffCharTruncated = true;
diffError = null;
} else {
diffText = rawDiff;
}
}
} catch (e) {
diffError = e instanceof Error ? e.message : String(e);
}
}
const truncatedNote =
diffCharTruncated && diffByteLength !== null
? ` (diff truncated in meta to ${DIFF_TEXT_MAX_CHARS} chars; full byte length ${diffByteLength})`
: "";
const meta: PrSummarizerMeta["fetcher"] = {
prUrl: resolved.prUrl,
owner: resolved.owner,
repo: resolved.repo,
prIndex: resolved.prIndex,
giteaBaseUrl: resolved.giteaBaseUrl,
title,
state,
diffText,
diffByteLength,
httpStatus,
errorMessage: diffError,
};
const content =
diffError !== null
? `Fetcher: ${resolved.owner}/${resolved.repo}#${resolved.prIndex} — failed. ${diffError}`
: `Fetcher: ${resolved.owner}/${resolved.repo}#${resolved.prIndex}${title ?? "(no title)"} [${state ?? "?"}] diff bytes=${diffByteLength ?? 0} HTTP=${httpStatus ?? "?"}${truncatedNote}`;
return { content, meta };
},
async analyzer(
start: StartStep,
messages: WorkflowMessage[],
): Promise<RoleResult<PrSummarizerMeta["analyzer"]>> {
const last = messages[messages.length - 1];
const fm = last.meta as PrSummarizerMeta["fetcher"];
const skip = (reason: string): RoleResult<PrSummarizerMeta["analyzer"]> => ({
content: `Analyzer skipped: ${reason}\n\n${reason}`,
meta: {
analysisMarkdown: `## 无法分析\n\n${reason}`,
providerModel: null,
errorMessage: reason,
},
});
if (last.role !== "fetcher") {
return skip("上一则消息不是 fetcher 输出");
}
if (fm.errorMessage !== null) {
return skip(`拉取阶段失败: ${fm.errorMessage}`);
}
const diff = fm.diffText;
if (diff === null || diff.length === 0) {
return skip("diff 为空,无法分析");
}
if (isDryRun(start)) {
return {
content: "[dryRun] Analyzer skipped real LLM call.",
meta: {
analysisMarkdown: "## dryRun\n\n未调用模型。",
providerModel: null,
errorMessage: null,
},
};
}
const provider = await resolveDashScopeProvider();
if (provider === null) {
const excerpt = diff.split("\n").slice(0, 80).join("\n");
const analysisMarkdown =
`## 静态摘要(无 LLM 凭据)\n\n` +
`- 仓库: ${fm.owner}/${fm.repo} PR #${fm.prIndex}\n` +
`- 标题: ${fm.title ?? "(null)"}\n` +
`- diff 行数(近似): ${diff.split("\n").length}\n\n` +
`### Diff 开头\n\n\`\`\`diff\n${excerpt}\n\`\`\`\n`;
return {
content: analysisMarkdown,
meta: {
analysisMarkdown,
providerModel: null,
errorMessage: null,
},
};
}
const diffForModel = diff.length > DIFF_LLM_MAX_CHARS ? diff.slice(0, DIFF_LLM_MAX_CHARS) : diff;
const truncated = diff.length > DIFF_LLM_MAX_CHARS;
const bundle =
`Repository: ${fm.owner}/${fm.repo} PR index ${fm.prIndex}\n` +
`Title: ${fm.title ?? ""}\n` +
`State: ${fm.state ?? ""}\n` +
(truncated ? `\n(diff truncated for model input to ${DIFF_LLM_MAX_CHARS} chars)\n` : "") +
`\n--- unified diff ---\n${diffForModel}`;
const extractPrompt =
`${nerveAgentContext}\n\n` +
`You are a senior reviewer. Analyze this Gitea pull request diff.\n` +
`Output structured findings as Markdown: scope, files touched, behavior change, risks, test ideas.\n\n` +
`Optional nerve.yaml context:\n\`\`\`yaml\n${getNerveYaml().slice(0, 4000)}\n\`\`\`\n\n` +
`---\n${bundle}`;
const extracted = await llmExtract({
text: extractPrompt,
schema: analysisExtractSchema,
provider,
dryRun: false,
});
if (!extracted.ok) {
const errText = JSON.stringify(extracted.error);
return {
content: `Analyzer LLM error: ${errText}`,
meta: {
analysisMarkdown: null,
providerModel: provider.model,
errorMessage: errText,
},
};
}
const analysisMarkdown = extracted.value.analysisMarkdown;
return {
content: analysisMarkdown,
meta: {
analysisMarkdown,
providerModel: provider.model,
errorMessage: null,
},
};
},
async writer(
start: StartStep,
messages: WorkflowMessage[],
): Promise<RoleResult<PrSummarizerMeta["writer"]>> {
const last = messages[messages.length - 1];
const am = last.meta as PrSummarizerMeta["analyzer"];
const errOut = (msg: string): RoleResult<PrSummarizerMeta["writer"]> => ({
content: `## 错误\n\n${msg}`,
meta: {
summaryZhMarkdown: `## 错误\n\n${msg}`,
errorMessage: msg,
},
});
if (last.role !== "analyzer") {
return errOut("上一则消息不是 analyzer 输出,无法生成总结。");
}
if (am.errorMessage !== null) {
return errOut(`分析阶段失败,未生成臆造总结:${am.errorMessage}`);
}
const analysis = am.analysisMarkdown;
if (analysis === null || analysis.trim() === "") {
return errOut("分析正文为空,无法生成中文总结。");
}
if (isDryRun(start)) {
const stub = "## dryRun\n\n未调用模型生成中文总结。";
return {
content: stub,
meta: { summaryZhMarkdown: stub, errorMessage: null },
};
}
const provider = await resolveDashScopeProvider();
if (provider === null) {
const stub =
`## 中文摘要(无 LLM)\n\n` +
`以下为上游分析原文摘录,请配置 DASHSCOPE 相关凭据以生成压缩中文总结。\n\n${analysis.slice(0, 8000)}`;
return {
content: stub,
meta: { summaryZhMarkdown: stub, errorMessage: null },
};
}
const writerPrompt =
`将下列 PR 技术分析改写为**中文 Markdown**交付物,包含:\n` +
`- 标题(含仓库与 PR 编号)\n` +
`- 变更要点(条列)\n` +
`- 风险与注意事项\n` +
`- 测试建议\n\n` +
`---\n${analysis}`;
const extracted = await llmExtract({
text: writerPrompt,
schema: summaryExtractSchema,
provider,
dryRun: false,
});
if (!extracted.ok) {
const msg = JSON.stringify(extracted.error);
return errOut(`Writer LLM 失败: ${msg}`);
}
const summaryZhMarkdown = extracted.value.summaryZhMarkdown;
return {
content: summaryZhMarkdown,
meta: {
summaryZhMarkdown,
errorMessage: null,
},
};
},
},
moderator(context: ModeratorContext<PrSummarizerMeta>) {
if (context.steps.length === 0) {
return "fetcher";
}
const signal = context.steps[context.steps.length - 1];
if (signal.role === "fetcher") {
return "analyzer";
}
if (signal.role === "analyzer") {
return "writer";
}
if (signal.role === "writer") {
return END;
}
return END;
},
};
export default workflow;

View File

@ -1,21 +0,0 @@
{
"name": "pr-summarizer-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"dependencies": {
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0"
},
"pnpm": {
"overrides": {
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
}
}
}

View File

@ -1,49 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
importers:
.:
dependencies:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
packages:
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
snapshots:
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
undici-types@6.21.0: {}
zod@4.3.6: {}

View File

@ -1,416 +0,0 @@
import type {
RoleResult,
StartStep,
WorkflowDefinition,
WorkflowMessage,
} from "@uncaged/nerve-core";
import { END } from "@uncaged/nerve-core";
import type { SpawnError } from "@uncaged/nerve-workflow-utils";
import {
cursorAgent,
llmExtract,
nerveAgentContext,
readNerveYaml,
spawnSafe,
} from "@uncaged/nerve-workflow-utils";
import { existsSync, readFileSync } from "node:fs";
import { join } from "node:path";
import { z } from "zod";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const SENSES_DIR = join(NERVE_ROOT, "senses");
function getNerveYaml(): string {
const result = readNerveYaml({ nerveRoot: NERVE_ROOT });
return result.ok ? result.value : "# nerve.yaml unavailable";
}
async function cfgGet(key: string): Promise<string | null> {
const result = await spawnSafe("cfg", ["get", key], {
cwd: NERVE_ROOT,
env: null,
timeoutMs: 10_000,
});
if (!result.ok) {
return null;
}
return result.value.stdout.trim() || null;
}
async function resolveDashScopeProvider(): Promise<{
baseUrl: string;
apiKey: string;
model: string;
} | null> {
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet("DASHSCOPE_API_KEY"));
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet("DASHSCOPE_BASE_URL"));
const model =
process.env.DASHSCOPE_MODEL ?? (await cfgGet("DASHSCOPE_MODEL")) ?? "qwen-plus";
if (!apiKey || !baseUrl) {
return null;
}
return { apiKey, baseUrl, model };
}
function formatSpawnFailure(error: SpawnError): string {
if (error.kind === "spawn_failed") {
return error.message;
}
if (error.kind === "timeout") {
return `timeout (stdout=${error.stdout.slice(0, 200)})`;
}
return `exit ${error.exitCode} stderr=${error.stderr.slice(0, 400)}`;
}
/**
* Run the same checks the workflow used to ask Hermes to perform, but locally.
* Hermes chat often returns UI prose instead of shell output, which caused false failures.
*/
async function runSenseSmokeTest(senseName: string): Promise<{ ok: boolean; log: string; reason: string }> {
const logParts: string[] = [];
const runNerve = async (args: string[]): Promise<{ ok: true; out: string } | { ok: false; err: string }> => {
const result = await spawnSafe("nerve", args, {
cwd: NERVE_ROOT,
env: null,
timeoutMs: 300_000,
});
if (!result.ok) {
return { ok: false, err: formatSpawnFailure(result.error) };
}
return { ok: true, out: result.value.stdout };
};
const statusRun = await runNerve(["status"]);
if (!statusRun.ok) {
return {
ok: false,
log: `=== nerve status ===\nERROR: ${statusRun.err}`,
reason: `Smoke test command failed: ${statusRun.err}`,
};
}
const status = statusRun.out;
logParts.push("=== nerve status ===\n" + status);
if (!status.includes(senseName)) {
return {
ok: false,
log: logParts.join("\n\n"),
reason: `Sense "${senseName}" not listed in \`nerve status\` output`,
};
}
const triggerRun = await runNerve(["sense", "trigger", senseName]);
if (!triggerRun.ok) {
logParts.push(`=== nerve sense trigger ===\nERROR: ${triggerRun.err}`);
return {
ok: false,
log: logParts.join("\n\n"),
reason: `Smoke test command failed: ${triggerRun.err}`,
};
}
logParts.push("=== nerve sense trigger ===\n" + triggerRun.out);
let lastQuery = "";
for (let i = 0; i < 25; i++) {
const sleepR = await spawnSafe("sleep", ["1"], { cwd: NERVE_ROOT, env: null, timeoutMs: 10_000 });
if (!sleepR.ok) {
logParts.push(`=== sleep (attempt ${i + 1}) ===\nERROR: ${formatSpawnFailure(sleepR.error)}`);
}
const queryRun = await runNerve(["sense", "query", senseName]);
if (!queryRun.ok) {
logParts.push(`=== nerve sense query (attempt ${i + 1}) ===\nERROR: ${queryRun.err}`);
} else {
lastQuery = queryRun.out;
logParts.push(`=== nerve sense query (attempt ${i + 1}) ===\n${lastQuery}`);
if (!lastQuery.includes("(0 rows)")) {
return {
ok: true,
log: logParts.join("\n\n"),
reason: "Trigger succeeded and query returned at least one row",
};
}
}
}
return {
ok: false,
log: logParts.join("\n\n"),
reason: lastQuery.includes("(0 rows)")
? "Query still returned 0 rows after trigger (compute error, throttle drop, or DB not written)"
: "Timed out waiting for successful sense query",
};
}
// Build context string with existing sense examples
function buildSenseExamples(): string {
const examples: string[] = [];
for (const name of ["cpu-usage", "linux-system-health"]) {
const dir = join(SENSES_DIR, name);
if (!existsSync(dir)) continue;
const indexFile = existsSync(join(dir, "index.js"))
? readFileSync(join(dir, "index.js"), "utf-8")
: "";
const schema = existsSync(join(dir, "schema.ts"))
? readFileSync(join(dir, "schema.ts"), "utf-8")
: "";
const migrationDir = join(dir, "migrations");
let migration = "";
if (existsSync(join(migrationDir, "0001_init.sql"))) {
migration = readFileSync(join(migrationDir, "0001_init.sql"), "utf-8");
}
examples.push(
`### Example sense: ${name}\n\n` +
`**index.js:**\n\`\`\`js\n${indexFile}\n\`\`\`\n\n` +
`**schema.ts:**\n\`\`\`ts\n${schema}\n\`\`\`\n\n` +
`**migrations/0001_init.sql:**\n\`\`\`sql\n${migration}\n\`\`\``,
);
}
return examples.join("\n\n---\n\n");
}
type SenseMeta = {
planner: { plan: string; senseName: string; userInput: string };
coder: { senseName: string; files: Record<string, boolean>; cursorOutput: string };
tester: { passed: boolean; senseName: string; reason: string; attempt: number };
};
const senseMetaSchema = z
.object({
name: z.string().describe("kebab-case sense name, e.g. 'disk-usage'"),
description: z.string().describe("One-line description of what this sense monitors"),
})
.describe("Extract the sense name and a one-line description from the plan");
const workflow: WorkflowDefinition<SenseMeta> = {
name: "sense-generator",
roles: {
async planner(
start: StartStep,
_messages: WorkflowMessage[],
): Promise<RoleResult<SenseMeta["planner"]>> {
const userInput = start.content;
const provider = await resolveDashScopeProvider();
if (provider === null) {
return {
content:
"Cannot run planner: set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or configure via `cfg get`), " +
"and optionally DASHSCOPE_MODEL.",
meta: { plan: "", senseName: "", userInput },
};
}
const planPrompt = `You are planning a new Nerve sense.
${nerveAgentContext}
User request: ${userInput}
Pick a good kebab-case name for this sense.
Your job is to produce a PLAN (not code) for this sense. Output a structured plan in markdown with these sections:
## Sense Design
### Name
(decide a kebab-case name)
### Fields
List every field the sense should collect, with name, type (integer/real/text), and description.
### Compute Logic
Describe step-by-step what the compute() function should do. Be specific about which Node.js APIs or shell commands to use.
### Trigger Config
- group: (suggest a group name)
- interval: (decide based on the use case, e.g. 30s, 1m, 5m)
- throttle: (suggest)
- timeout: (suggest)
Here are existing senses for reference on the format and patterns used:
${buildSenseExamples()}
Current nerve.yaml:
\`\`\`yaml
${getNerveYaml()}
\`\`\`
Output ONLY the plan in markdown. Be precise and implementation-ready.`;
const planResult = await cursorAgent({
prompt: planPrompt,
mode: "ask",
cwd: NERVE_ROOT,
env: null,
timeoutMs: null,
});
if (!planResult.ok) {
return {
content: `cursor-agent failed: ${formatSpawnFailure(planResult.error)}`,
meta: { plan: "", senseName: "", userInput },
};
}
const plan = planResult.value;
const extracted = await llmExtract({
text: plan,
schema: senseMetaSchema,
provider,
});
if (!extracted.ok) {
return {
content: `${plan}\n\n[llmExtract error] ${JSON.stringify(extracted.error)}`,
meta: { plan, senseName: "", userInput },
};
}
return {
content: plan,
meta: { plan, senseName: extracted.value.name, userInput },
};
},
async coder(
_start: StartStep,
messages: WorkflowMessage[],
): Promise<RoleResult<SenseMeta["coder"]>> {
const last = messages[messages.length - 1];
const { plan, senseName } = last.meta as { plan: string; senseName: string };
const codePrompt = `You are implementing a new Nerve sense called "${senseName}" in the directory ${SENSES_DIR}/${senseName}/.
Here is the plan:
${plan}
You need to create exactly 3 files:
1. \`${SENSES_DIR}/${senseName}/index.js\` — the compute() function
2. \`${SENSES_DIR}/${senseName}/schema.ts\` — Drizzle ORM schema
3. \`${SENSES_DIR}/${senseName}/migrations/0001_init.sql\` — SQLite migration
And UPDATE the existing file:
4. \`${NERVE_ROOT}/nerve.yaml\` — add the new sense config and reflex entry
Here are existing senses for reference follow the EXACT same patterns:
${buildSenseExamples()}
Current nerve.yaml (append to it, don't overwrite existing entries):
\`\`\`yaml
${getNerveYaml()}
\`\`\`
IMPORTANT RULES:
- index.js uses \`export async function compute(db, _peers)\` signature
- index.js imports the schema table from "./schema.ts" and uses \`await db.insert(table).values({...})\` to persist
- schema.ts uses drizzle-orm/sqlite-core imports
- migration SQL must match schema.ts exactly
- nerve.yaml: add under \`senses:\` and add a reflex under \`reflexes:\`
- Use the interval specified in the plan for the reflex
Create all files now.`;
const agentResult = await cursorAgent({
prompt: codePrompt,
mode: "default",
cwd: NERVE_ROOT,
env: null,
timeoutMs: null,
});
if (!agentResult.ok) {
const resultText = `cursor-agent failed: ${formatSpawnFailure(agentResult.error)}`;
return {
content: resultText,
meta: {
senseName,
files: { index: false, schema: false, migration: false },
cursorOutput: resultText,
},
};
}
const result = agentResult.value;
const senseDir = join(SENSES_DIR, senseName);
const files = {
index: existsSync(join(senseDir, "index.js")),
schema: existsSync(join(senseDir, "schema.ts")),
migration: existsSync(join(senseDir, "migrations", "0001_init.sql")),
};
return {
content: result,
meta: { senseName, files, cursorOutput: result },
};
},
async tester(
_start: StartStep,
messages: WorkflowMessage[],
): Promise<RoleResult<SenseMeta["tester"]>> {
const last = messages[messages.length - 1];
const { senseName, files } = last.meta as { senseName: string; files: Record<string, boolean> };
const attempt = messages.filter((m) => m.role === "tester").length + 1;
const missing = Object.entries(files).filter(([, v]) => !v).map(([k]) => k);
if (missing.length > 0) {
return {
content: `FAIL — missing files: ${missing.join(", ")}`,
meta: { passed: false, senseName, reason: `Missing files: ${missing.join(", ")}`, attempt },
};
}
const smoke = await runSenseSmokeTest(senseName);
if (smoke.ok) {
return {
content: `PASS — ${smoke.reason}`,
meta: { passed: true, senseName, reason: smoke.reason, attempt },
};
}
return {
content: `FAIL — ${smoke.reason}`,
meta: {
passed: false,
senseName,
reason: `${smoke.reason}\n\n--- smoke log ---\n${smoke.log}`,
attempt,
},
};
},
},
moderator(context) {
if (context.steps.length === 0) {
return "planner";
}
const signal = context.steps[context.steps.length - 1];
if (signal.role === "planner") {
return "coder";
}
if (signal.role === "coder") {
return "tester";
}
if (signal.role === "tester") {
const meta = signal.meta;
if (meta.passed) {
return END;
}
if (meta.attempt < 3) {
return "coder";
}
return END;
}
return END;
},
};
export default workflow;

View File

@ -1,22 +0,0 @@
{
"name": "sense-generator-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"dependencies": {
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"typescript": "^5.7.0"
},
"pnpm": {
"overrides": {
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
}
}
}

View File

@ -1,59 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
importers:
.:
dependencies:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
typescript:
specifier: ^5.7.0
version: 5.9.3
packages:
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
snapshots:
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
typescript@5.9.3: {}
undici-types@6.21.0: {}
zod@4.3.6: {}

View File

@ -1,13 +0,0 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
}

View File

@ -0,0 +1,43 @@
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
import { createCommitterRole } from "./roles/committer.js";
import { createImplementRole } from "./roles/implement.js";
import { createPlanRole } from "./roles/plan.js";
import { createPrepareRole } from "./roles/prepare.js";
import { createPublishRole } from "./roles/publish.js";
import { createReadIssueRole } from "./roles/read-issue.js";
import { createReviewRole } from "./roles/review.js";
import { createTestRole } from "./roles/test.js";
export type CreateSolveIssueDeps = {
defaultAdapter: AgentFn;
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
nerveRoot: string;
extract: LlmExtractorConfig;
};
export function createSolveIssueWorkflow({
defaultAdapter,
adapters,
nerveRoot,
extract,
}: CreateSolveIssueDeps): WorkflowDefinition<WorkflowMeta> {
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
return {
name: "solve-issue",
roles: {
"read-issue": createReadIssueRole(a("read-issue"), extract),
prepare: createPrepareRole(a("prepare"), extract),
plan: createPlanRole(a("plan"), { extract, nerveRoot }),
implement: createImplementRole(a("implement"), { extract, nerveRoot }),
committer: createCommitterRole(a("committer"), extract),
review: createReviewRole(a("review"), extract, nerveRoot),
test: createTestRole(a("test"), extract),
publish: createPublishRole(a("publish"), { extract, nerveRoot }),
},
moderator,
};
}

View File

@ -0,0 +1,37 @@
import { join } from "node:path";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createSolveIssueWorkflow } from "./build.js";
import { resolveDashScopeProvider } from "./lib/provider.js";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const provider = await resolveDashScopeProvider(NERVE_ROOT);
if (provider === null) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createSolveIssueWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
plan: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
implement: createCursorAdapter({
type: "cursor",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
},
nerveRoot: NERVE_ROOT,
extract: { provider },
});
export default workflow;

View File

@ -0,0 +1,26 @@
import type { LlmProvider } from "@uncaged/nerve-workflow-utils";
import { spawnSafe } from "@uncaged/nerve-workflow-utils";
export async function cfgGet(nerveRoot: string, key: string): Promise<string | null> {
const result = await spawnSafe("cfg", ["get", key], {
cwd: nerveRoot,
env: null,
timeoutMs: 10_000,
abortSignal: null,
});
if (!result.ok) {
return null;
}
const value = result.value.stdout.trim();
return value.length > 0 ? value : null;
}
export async function resolveDashScopeProvider(nerveRoot: string): Promise<LlmProvider | null> {
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet(nerveRoot, "DASHSCOPE_API_KEY"));
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet(nerveRoot, "DASHSCOPE_BASE_URL"));
const model = process.env.DASHSCOPE_MODEL ?? (await cfgGet(nerveRoot, "DASHSCOPE_MODEL")) ?? "qwen-plus";
if (!apiKey || !baseUrl) {
return null;
}
return { apiKey, baseUrl, model };
}

View File

@ -0,0 +1,86 @@
import { join } from "node:path";
import type { RoleStep, WorkflowMessage } from "@uncaged/nerve-core";
type SolveIssueParse = {
host: string;
owner: string;
repo: string;
number: number;
};
type SolveIssueRepo = {
path: string;
defaultBranch: string;
packageManager: string;
};
const HOME = process.env.HOME ?? "/home/azureuser";
function extractMarkedSection(text: string, marker: string): Record<string, string> | null {
const escaped = marker.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
const re = new RegExp(`---${escaped}---\\s*([\\s\\S]*?)(?:\\n---|$)`);
const m = text.match(re);
if (m === null) {
return null;
}
const rec: Record<string, string> = {};
for (const line of m[1].split("\n")) {
const kv = line.match(/^([a-zA-Z]+):\s*(.+)$/);
if (kv !== null) {
rec[kv[1]] = kv[2].trim();
}
}
return Object.keys(rec).length > 0 ? rec : null;
}
function parseSolveIssueParse(text: string): SolveIssueParse | null {
const rec = extractMarkedSection(text, "SOLVE_ISSUE_PARSE");
if (rec === null) {
return null;
}
const host = rec.host ?? "";
const owner = rec.owner ?? "";
const repo = rec.repo ?? "";
const num = Number(rec.number ?? "");
if (host.length === 0 || owner.length === 0 || repo.length === 0 || !Number.isFinite(num) || num <= 0) {
return null;
}
return { host, owner, repo, number: num };
}
function parseSolveIssueRepo(text: string): SolveIssueRepo | null {
const rec = extractMarkedSection(text, "SOLVE_ISSUE_REPO");
if (rec === null) {
return null;
}
const path = rec.path ?? "";
if (path.length === 0) {
return null;
}
return {
path,
defaultBranch: rec.defaultBranch ?? "main",
packageManager: rec.packageManager ?? "pnpm",
};
}
/** Prefer explicit prepare marker; else ~/Code/<owner>/<repo> from read-issue parse block. */
export function resolveRepoCwd(messages: WorkflowMessage[]): string | null {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === "prepare") {
const repo = parseSolveIssueRepo(messages[i].content);
if (repo !== null) {
return repo.path;
}
}
}
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === "read-issue") {
const parsed = parseSolveIssueParse(messages[i].content);
if (parsed !== null) {
return join(HOME, "Code", parsed.owner, parsed.repo);
}
}
}
return null;
}

View File

@ -0,0 +1,99 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator } from "@uncaged/nerve-core";
import type { ReadIssueMeta } from "./roles/read-issue.js";
import type { PrepareMeta } from "./roles/prepare.js";
import type { PlanMeta } from "./roles/plan.js";
import type { ImplementMeta } from "./roles/implement.js";
import type { CommitterMeta } from "./roles/committer.js";
import type { ReviewMeta } from "./roles/review.js";
import type { TestMeta } from "./roles/test.js";
import type { PublishMeta } from "./roles/publish.js";
export type WorkflowMeta = {
"read-issue": ReadIssueMeta;
prepare: PrepareMeta;
plan: PlanMeta;
implement: ImplementMeta;
committer: CommitterMeta;
review: ReviewMeta;
test: TestMeta;
publish: PublishMeta;
};
const MAX_IMPLEMENT_ROUNDS = 20;
const MAX_TOTAL_REJECTIONS = 10;
function implementRounds(steps: { role: string }[]): number {
return steps.filter((s) => s.role === "implement").length;
}
function totalRejections(steps: { role: string; meta: unknown }[]): number {
return steps.filter((s) => {
if (s.role === "review") return !(s.meta as Record<string, boolean>).approved;
if (s.role === "test") return !(s.meta as Record<string, boolean>).passed;
if (s.role === "committer") return !(s.meta as Record<string, boolean>).committed;
if (s.role === "publish") return !(s.meta as Record<string, boolean>).success;
return false;
}).length;
}
function canRetryImplement(steps: { role: string; meta: unknown }[]): boolean {
return implementRounds(steps) < MAX_IMPLEMENT_ROUNDS && totalRejections(steps) < MAX_TOTAL_REJECTIONS;
}
export const moderator: Moderator<WorkflowMeta> = (context) => {
if (context.steps.length === 0) {
return "read-issue";
}
const last = context.steps[context.steps.length - 1];
if (last.role === "read-issue") {
return last.meta.ready ? "prepare" : END;
}
if (last.role === "prepare") {
return last.meta.ready ? "plan" : END;
}
if (last.role === "plan") {
return last.meta.ready ? "implement" : END;
}
if (last.role === "implement") {
if (last.meta.done) {
return "committer";
}
return canRetryImplement(context.steps) ? "implement" : END;
}
if (last.role === "committer") {
if (last.meta.committed) {
return "review";
}
return canRetryImplement(context.steps) ? "implement" : END;
}
if (last.role === "review") {
if (last.meta.approved) {
return "test";
}
return canRetryImplement(context.steps) ? "implement" : END;
}
if (last.role === "test") {
if (last.meta.passed) {
return "publish";
}
return canRetryImplement(context.steps) ? "implement" : END;
}
if (last.role === "publish") {
if (last.meta.success) {
return END;
}
return canRetryImplement(context.steps) ? "implement" : END;
}
return END;
};

View File

@ -0,0 +1,57 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, decorateRole, withDryRun, onFail } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function committerPrompt({ threadId }: { threadId: string }): string {
return `You are the committer agent. The **implement** step finished with a passing build; your job is to branch, commit, and push.
1. Read the workflow thread: \`nerve thread show ${threadId}\` — understand what was planned, implemented, and reviewed.
2. In the thread, locate \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\`. From them you need issue **number**, **title** (for the branch slug), repo **path**, and **defaultBranch**.
3. \`cd\` to the repo **path** from the markers. Optionally read \`CONVENTIONS.md\` in that repo root if present.
4. Run \`git rev-parse --abbrev-ref HEAD\` and compare with **defaultBranch** from the markers. Implement leaves changes uncommitted on the default branch — you should be on that branch with a dirty working tree. If you are not on the default branch, or the tree is clean when you expected changes, set **committed** to false and explain.
5. Run \`git status\`. If there is nothing to commit, set **committed** to false and explain.
6. Create a feature branch (do not commit directly on the default branch if it would mix unrelated work):
- Name: \`fix/<number>-<short-slug>\` for fixes, or \`feat/<number>-<short-slug>\` if the issue is clearly a feature.
- **slug**: lowercase, hyphens only, short (from issue title words).
- Example: \`git checkout -b fix/42-auth-timeout\`
7. \`git add -A\`
8. Write a **conventional commit** message describing what changed and why, using the thread context.
9. \`git commit -m "<message>"\` — do NOT pass \`--author\`, use repo git config.
10. \`git push -u origin <branch-name>\`
**committed=true** only if branch was created, commit succeeded, and **push** succeeded.
End your reply with a JSON line:
\`\`\`json
{ "committed": true }
\`\`\`
or
\`\`\`json
{ "committed": false }
\`\`\``;
}
export const committerMetaSchema = z.object({
committed: z
.boolean()
.describe("true if branch created, changes committed, and pushed successfully"),
});
export type CommitterMeta = z.infer<typeof committerMetaSchema>;
export function createCommitterRole(
adapter: AgentFn,
extract: LlmExtractorConfig,
): Role<CommitterMeta> {
const inner = createRole(
adapter,
async (ctx: ThreadContext) => committerPrompt({ threadId: ctx.start.meta.threadId }),
committerMetaSchema,
extract,
);
return decorateRole(inner, [
withDryRun({ label: "committer", meta: { committed: true } as CommitterMeta }),
onFail({ label: "committer", meta: { committed: false } as CommitterMeta }),
]) as Role<CommitterMeta>;
}

View File

@ -0,0 +1,86 @@
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../lib/repo-context.js";
function buildImplementPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **implement** agent. You apply code changes for the issue.
Read workflow context (plan, reviewer/test feedback): \`nerve thread show ${threadId}\`
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
Your cwd is the target repository.
## Requirements
1. Implement the planned changes; address reviewer/tester feedback from the thread if any.
2. Run the project **build** (\`pnpm build\`, \`npm run build\`, etc.) and fix issues until build passes.
3. Multi-step: if you cannot finish this round, explain why and set **done** to false.
Do **not** run \`git checkout -b\`, \`git add\`, \`git commit\`, or \`git push\`. **Never** create commits on any branch — branching and commits are handled by the **committer** step after you finish.
Then close with JSON:
\`\`\`json
{ "done": true }
\`\`\`
or \`{ "done": false }\` matching whether implementation is complete.
**done=true** only when changes are complete **and** build passes in this round.`;
}
export const implementMetaSchema = z.object({
done: z.boolean().describe("true when changes are complete and build passes this round"),
});
export type ImplementMeta = z.infer<typeof implementMetaSchema>;
export type CreateImplementRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
export function createImplementRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreateImplementRoleDeps,
): Role<ImplementMeta> {
return async (ctx: ThreadContext): Promise<RoleResult<ImplementMeta>> => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "implement cannot run: missing repo path in thread markers",
meta: { done: false },
};
}
const innerRole = createRole(
adapter,
async (innerCtx: ThreadContext) =>
buildImplementPrompt({
threadId: innerCtx.start.meta.threadId,
nerveRoot,
}),
implementMetaSchema,
extract,
);
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: cwd },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `implement failed: ${msg}`,
meta: { done: false },
};
}
};
}

View File

@ -0,0 +1,88 @@
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../lib/repo-context.js";
function buildPlanPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **plan** agent (analysis only — ask mode). You produce an implementation plan for fixing the issue.
Read workflow context: \`nerve thread show ${threadId}\`
Read Nerve workspace conventions (coding rules for agents): \`cat ${nerveRoot}/CONVENTIONS.md\`
In the **target repository** (your cwd), skim relevant files and read \`CONVENTIONS.md\` **if it exists** there.
## Output
Write an implementation plan in **markdown** with:
1. Problem understanding
2. Change strategy
3. Target files (paths)
4. **Test commands** to run (explicit shell commands, e.g. \`pnpm test\`, \`pnpm vitest run\`)
5. Risks
End your reply with a JSON code block (meta signal):
\`\`\`json
{ "ready": true }
\`\`\`
Use \`{ "ready": false }\` if the plan cannot be made actionable.
**ready=true** only when the plan is clear and actionable.`;
}
export const planMetaSchema = z.object({
ready: z.boolean().describe("true if plan is clear and actionable"),
});
export type PlanMeta = z.infer<typeof planMetaSchema>;
export type CreatePlanRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
export function createPlanRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreatePlanRoleDeps,
): Role<PlanMeta> {
return async (ctx: ThreadContext): Promise<RoleResult<PlanMeta>> => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "plan cannot run: missing ---SOLVE_ISSUE_REPO--- or ---SOLVE_ISSUE_PARSE--- in thread",
meta: { ready: false },
};
}
const innerRole = createRole(
adapter,
async (innerCtx: ThreadContext) =>
buildPlanPrompt({
threadId: innerCtx.start.meta.threadId,
nerveRoot,
}),
planMetaSchema,
extract,
);
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: cwd },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `plan failed: ${msg}`,
meta: { ready: false },
};
}
};
}

View File

@ -0,0 +1,73 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function preparePrompt({ threadId }: { threadId: string }): string {
return `You are the **prepare** agent. You ensure the target repository is ready for work.
Read prior messages / thread for issue markers: \`nerve thread show ${threadId}\`
## Goal
Find **owner**, **repo**, and **host** from \`---SOLVE_ISSUE_PARSE---\` in the thread (from read-issue).
Check the **initial user prompt** (the trigger message) for a local repo path. The user may specify it like:
- \`--repo /path/to/repo\`
- \`repo: /path/to/repo\`
- or just mention an absolute path to the local clone
## Steps
### If a local path is provided in the trigger prompt:
1. Verify \`<path>/.git\` exists — if not, fail with \`ready: false\`
2. \`cd "<path>" && git fetch --all\`
3. Ensure working tree clean: if \`git status --porcelain\` is non-empty, \`git stash push -u -m "solve-issue stash"\`
4. Detect default branch (\`main\` or \`master\`) and \`git checkout <default> && git pull --ff-only\`
5. Use this path as REPOPATH
### If no local path is provided:
1. Let \`REPOPATH=$HOME/Code/<owner>/<repo>\` (expand \`$HOME\`)
2. \`mkdir -p "$HOME/Code/<owner>"\`
3. If \`REPOPATH/.git\` is missing: \`git clone https://<host>/<owner>/<repo>.git "$REPOPATH"\`
Else: \`cd "$REPOPATH" && git fetch --all && git pull --ff-only\`
4. Ensure working tree clean: if \`git status --porcelain\` is non-empty, \`git stash push -u -m "solve-issue stash"\`
5. Detect default branch and \`git checkout <default>\`
### Then (both paths):
6. Detect package manager: \`pnpm-lock.yaml\` → pnpm, \`yarn.lock\` → yarn, \`package-lock.json\` → npm; run install (\`pnpm install --no-frozen-lockfile\` / \`npm ci\` or \`npm install\` / \`yarn\`).
7. If \`package.json\` has a \`build\` script, run the build (\`pnpm build\`, etc.) and fix nothing — only verify baseline passes.
## Required marker block
Emit **exactly**:
\`\`\`
---SOLVE_ISSUE_REPO---
path: <absolute path to REPOPATH>
defaultBranch: <main or master>
packageManager: <pnpm|npm|yarn>
---
\`\`\`
End with:
\`\`\`json
{ "ready": true }
\`\`\`
or \`{ "ready": false }\` if the repo is invalid, or install/build baseline failed.
**ready=true** only when the repo exists at \`path\`, is clean, dependencies installed, and baseline build succeeded (or no build script).`;
}
export const prepareMetaSchema = z.object({
ready: z.boolean().describe("true if repo is ready and baseline build ok"),
});
export type PrepareMeta = z.infer<typeof prepareMetaSchema>;
export function createPrepareRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<PrepareMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => preparePrompt({ threadId: ctx.start.meta.threadId }),
prepareMetaSchema,
extract,
);
}

View File

@ -0,0 +1,110 @@
import { mkdirSync, writeFileSync } from "node:fs";
import { join } from "node:path";
import type { AgentFn, Role, RoleResult, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, isDryRun } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function buildPublishPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **publish** agent (Hermes). Test has passed. Open a pull request for the current branch using the **tea** CLI.
## Context
- Read the full workflow thread: \`nerve thread show ${threadId}\`
- Nerve workspace conventions (for tone/consistency, optional): \`cat ${nerveRoot}/CONVENTIONS.md\`
## Repo and issue (from the thread)
Find \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\` in prior messages. You need:
- \`path\` — clone checkout directory (this is your working copy)
- \`host\`, \`owner\`, \`repo\`, \`number\` for the issue
- \`defaultBranch\` (for PR base) from SOLVE_ISSUE_REPO
**Issue link** for the Ref section: \`https://<host>/<owner>/<repo>/issues/<number>\`
## Steps (in order)
1. \`cd\` to the **repo \`path\`**. Run \`git rev-parse --abbrev-ref HEAD\` to get the current branch name. The **committer** step should already have pushed this branch; run \`git push -u origin <that-branch>\` only if the branch is not yet on the remote.
2. Choose a **PR title** that reflects the real change (not a generic \`fix: issue #N\`): derive it from the issue title, plan, and thread summary (keep it concise; Conventional Commits style is fine, e.g. \`fix(auth): handle session expiry\`).
3. Write a **PR body** in Markdown with exactly these sections, in this order, each with a \`##\` heading (fill with concise content based on the thread: plan, implement, review, test):
- **## What** one short paragraph: what this PR does
- **## Why** one short paragraph: motivation / issue
- **## Changes** bullet list of notable changes
- **## Ref** include one line \`Fixes #<number>\` (same \`number\` from SOLVE_ISSUE_PARSE; closes/links the issue where supported) **and** the issue URL \`https://<host>/<owner>/<repo>/issues/<number>\`
4. Create the PR with **tea** (not curl/fetch to Gitea):
- \`tea pr create --repo <owner>/<repo> --base <defaultBranch> --head <branch> --title "<your meaningful title>" --body <your markdown body>\`
- You may use a heredoc or a temp file for \`--body\` if the shell requires it; keep the four sections in the body.
5. Confirm the PR was created (tea prints a URL or PR number in typical setups).
**success=true** only if both **push** and **tea** PR creation succeed. If any step fails, set **success=false** and say why.
End your reply with a JSON line:
\`\`\`json
{ "success": true }
\`\`\`
or
\`\`\`json
{ "success": false }
\`\`\``;
}
export const publishMetaSchema = z.object({
success: z.boolean().describe("true if git push and tea pr create both succeeded"),
});
export type PublishMeta = z.infer<typeof publishMetaSchema>;
export type CreatePublishRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
function logPath(nerveRoot: string): string {
return join(nerveRoot, "logs", `solve-issue-publish-${Date.now()}.log`);
}
export function createPublishRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreatePublishRoleDeps,
): Role<PublishMeta> {
const innerRole = createRole(
adapter,
async (ctx: ThreadContext) =>
buildPublishPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
publishMetaSchema,
extract,
);
return async (ctx: ThreadContext): Promise<RoleResult<PublishMeta>> => {
const file = logPath(nerveRoot);
mkdirSync(join(file, ".."), { recursive: true });
if (isDryRun(ctx.start)) {
const msg = "[dry-run] publish skipped (no git push / PR)";
writeFileSync(file, `${msg}\n`, "utf-8");
return {
content: `[dry-run] publish skipped — log: ${file}`,
meta: { success: true },
};
}
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: nerveRoot },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
const body = `publish failed: ${msg}\n`;
writeFileSync(file, body, "utf-8");
return {
content: `publish failed: ${msg}\nLog: ${file}`,
meta: { success: false },
};
}
};
}

View File

@ -0,0 +1,53 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function readIssuePrompt({ threadId }: { threadId: string }): string {
return `You are the **read-issue** agent. You fetch Gitea issue content via the \`tea\` CLI.
Read the workflow thread start prompt for the issue URL (same run): \`nerve thread show ${threadId}\`
## Steps
1. From the **initial user prompt** (issue URL), extract **host**, **owner**, **repo**, and **issue number**. Supported shape:
\`https://<host>/<owner>/<repo>/issues/<number>\`
2. Run:
\`tea issue show <number> --repo <owner>/<repo> --comments\`
(Add \`--json\` if helpful for parsing.)
3. In your reply, include **structured issue text**: title, body, labels, and each comment (author + body + time).
4. You **must** emit this marker block **exactly** (fill in real values):
\`\`\`
---SOLVE_ISSUE_PARSE---
host: <host>
owner: <owner>
repo: <repo>
number: <number>
---
\`\`\`
5. End with JSON meta (verbatim block):
\`\`\`json
{ "ready": true }
\`\`\`
Use \`{ "ready": false }\` if you could not fetch or parse the issue.
**ready=true** only if the issue was fetched successfully and the marker block is correct.`;
}
export const readIssueMetaSchema = z.object({
ready: z.boolean().describe("true if issue content was fetched and markers are present"),
});
export type ReadIssueMeta = z.infer<typeof readIssueMetaSchema>;
export function createReadIssueRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<ReadIssueMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => readIssuePrompt({ threadId: ctx.start.meta.threadId }),
readIssueMetaSchema,
extract,
);
}

View File

@ -0,0 +1,59 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function reviewPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are a **code reviewer** (Hermes). You run after implement and before test.
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
Read workflow context: \`nerve thread show ${threadId}\`
Find **repo path** from \`---SOLVE_ISSUE_REPO--- path:\` in the thread (prepare step). \`cd\` there before any git commands.
## Static analysis
Run:
1. \`cd <repo-path> && git diff --stat\`
2. \`cd <repo-path> && git diff\`
3. \`cd <repo-path> && git status --short\`
## Checklist
Reject (**approved: false**) if you find:
- Garbage files, secrets/credentials, unrelated changes
- Violations of CONVENTIONS.md (e.g. \`interface\` vs \`type\`, dynamic \`import()\`)
Approve (**approved: true**) if the diff is clean and focused.
End with:
\`\`\`json
{ "approved": true }
\`\`\`
or
\`\`\`json
{ "approved": false }
\`\`\``;
}
export const reviewMetaSchema = z.object({
approved: z.boolean().describe("true if diff is clean and ready for tests"),
});
export type ReviewMeta = z.infer<typeof reviewMetaSchema>;
export function createReviewRole(
adapter: AgentFn,
extract: LlmExtractorConfig,
nerveRoot: string,
): Role<ReviewMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) =>
reviewPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
reviewMetaSchema,
extract,
);
}

View File

@ -0,0 +1,40 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function testPrompt({ threadId }: { threadId: string }): string {
return `You are the **test** agent (Hermes). You execute automated tests for the change.
Read workflow context: \`nerve thread show ${threadId}\`
Find **repo path** from \`---SOLVE_ISSUE_REPO--- path:\` in the thread.
From the **plan** step output, locate **Test commands** (explicit shell commands). Run each command with cwd = repo path, in order.
If the plan lists **no** test commands, try **pnpm test**, then **npm test** if pnpm is unavailable; if neither applies, explain skip.
Collect stdout/stderr snippets on failure.
End with JSON only:
\`\`\`json
{ "passed": true }
\`\`\`
or \`{ "passed": false }\`
**passed=true** only if every executed command exited 0 (or skip was justified with no failing command).`;
}
export const testMetaSchema = z.object({
passed: z.boolean().describe("true if all test commands passed"),
});
export type TestMeta = z.infer<typeof testMetaSchema>;
export function createTestRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<TestMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => testPrompt({ threadId: ctx.start.meta.threadId }),
testMetaSchema,
extract,
);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +0,0 @@
{
"name": "workflow-generator-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"dependencies": {
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"typescript": "^5.7.0"
},
"pnpm": {
"overrides": {
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
}
}
}

View File

@ -1,59 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
overrides:
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
importers:
.:
dependencies:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
typescript:
specifier: ^5.7.0
version: 5.9.3
packages:
'@types/node@22.19.17':
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
typescript@5.9.3:
resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==}
engines: {node: '>=14.17'}
hasBin: true
undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
snapshots:
'@types/node@22.19.17':
dependencies:
undici-types: 6.21.0
typescript@5.9.3: {}
undici-types@6.21.0: {}
zod@4.3.6: {}

View File

@ -1,13 +0,0 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
}