Compare commits

..

No commits in common. "master" and "fix/issue-7-rename-generator-to-workflow" have entirely different histories.

98 changed files with 2798 additions and 1246 deletions

1
.gitignore vendored
View File

@ -5,4 +5,3 @@ nerve.pid
nerve.sock
false/
*.db
dist/

View File

@ -78,8 +78,7 @@ workflows/<name>/
| `createCursorRole` | Needs file system access (code generation, planning) | planner, coder |
| `createHermesRole` | Needs shell + tools (testing, reviewing) | tester, reviewer |
| `createLlmRole` | Pure LLM reasoning, no tools | analysis roles |
| `createRole(hermesAdapter, …)` | Agent role with LLM + shell (branch/commit/push from thread context) | solve-issue committer, publish |
| Direct `Role<Meta>` | No LLM needed, scripted logic | thin wrappers only |
| Direct `Role<Meta>` | No LLM needed, scripted logic | committer |
### Meta Convention
@ -88,8 +87,7 @@ Meta is a **routing signal only** — one boolean per role:
- `{ done: boolean }` — coder
- `{ approved: boolean }` — reviewer
- `{ passed: boolean }` — tester
- `{ committed: boolean }` — committer (solve-issue: branch created, pushed)
- `{ success: boolean }` — publish (PR opened)
- `{ success: boolean }` — committer
### Standard Flow

View File

@ -5,11 +5,31 @@ extract:
model: qwen-plus
senses:
linux-system-health:
group: system
interval: 30s
throttle: 10s
timeout: 15s
hermes-gateway-health:
group: system
interval: 2m
throttle: 30s
timeout: 30s
hermes-session-message-stats:
group: hermes
interval: 15m
throttle: 30s
timeout: 60s
worker-process-metrics:
group: system
interval: 1m
throttle: 15s
timeout: 5s
git-workspace-status:
group: workspace
interval: 2m
throttle: 30s
timeout: 15s
workflows:
develop-sense:
@ -21,6 +41,3 @@ workflows:
solve-issue:
concurrency: 1
overflow: queue
extract-knowledge:
concurrency: 1
overflow: queue

View File

@ -4,25 +4,19 @@
"private": true,
"type": "module",
"scripts": {
"build": "node scripts/build.mjs"
"build": "pnpm -r build"
},
"dependencies": {
"@uncaged/nerve-adapter-cursor": "link:../repos/nerve/packages/adapter-cursor",
"@uncaged/nerve-adapter-hermes": "link:../repos/nerve/packages/adapter-hermes",
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-daemon": "link:../repos/nerve/packages/daemon",
"@uncaged/nerve-role-committer": "link:../repos/nerve/packages/role-committer",
"@uncaged/nerve-role-reviewer": "link:../repos/nerve/packages/role-reviewer",
"@uncaged/nerve-workflow-meta": "link:../repos/nerve/packages/workflow-meta",
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils",
"drizzle-orm": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"drizzle-kit": "latest",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
"drizzle-kit": "latest"
},
"pnpm": {
"onlyBuiltDependencies": [
@ -33,9 +27,7 @@
"@uncaged/nerve-adapter-hermes": "link:../repos/nerve/packages/adapter-hermes",
"@uncaged/nerve-daemon": "link:../repos/nerve/packages/daemon",
"@uncaged/nerve-core": "link:../repos/nerve/packages/core",
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils",
"@uncaged/nerve-role-committer": "link:../repos/nerve/packages/role-committer",
"@uncaged/nerve-workflow-meta": "link:../repos/nerve/packages/workflow-meta"
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils"
}
}
}

54
pnpm-lock.yaml generated
View File

@ -10,8 +10,6 @@ overrides:
'@uncaged/nerve-daemon': link:../repos/nerve/packages/daemon
'@uncaged/nerve-core': link:../repos/nerve/packages/core
'@uncaged/nerve-workflow-utils': link:../repos/nerve/packages/workflow-utils
'@uncaged/nerve-role-committer': link:../repos/nerve/packages/role-committer
'@uncaged/nerve-workflow-meta': link:../repos/nerve/packages/workflow-meta
importers:
@ -29,15 +27,6 @@ importers:
'@uncaged/nerve-daemon':
specifier: link:../repos/nerve/packages/daemon
version: link:../repos/nerve/packages/daemon
'@uncaged/nerve-role-committer':
specifier: link:../repos/nerve/packages/role-committer
version: link:../repos/nerve/packages/role-committer
'@uncaged/nerve-role-reviewer':
specifier: link:../repos/nerve/packages/role-reviewer
version: link:../repos/nerve/packages/role-reviewer
'@uncaged/nerve-workflow-meta':
specifier: link:../repos/nerve/packages/workflow-meta
version: link:../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../repos/nerve/packages/workflow-utils
version: link:../repos/nerve/packages/workflow-utils
@ -48,18 +37,9 @@ importers:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
drizzle-kit:
specifier: latest
version: 0.31.10
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
senses/git-workspace-status:
devDependencies:
@ -132,9 +112,6 @@ importers:
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-meta':
specifier: link:../../../repos/nerve/packages/workflow-meta
version: link:../../../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
@ -153,37 +130,6 @@ importers:
version: 5.9.3
workflows/develop-workflow:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor
version: link:../../../repos/nerve/packages/adapter-cursor
'@uncaged/nerve-adapter-hermes':
specifier: link:../../../repos/nerve/packages/adapter-hermes
version: link:../../../repos/nerve/packages/adapter-hermes
'@uncaged/nerve-core':
specifier: link:../../../repos/nerve/packages/core
version: link:../../../repos/nerve/packages/core
'@uncaged/nerve-workflow-meta':
specifier: link:../../../repos/nerve/packages/workflow-meta
version: link:../../../repos/nerve/packages/workflow-meta
'@uncaged/nerve-workflow-utils':
specifier: link:../../../repos/nerve/packages/workflow-utils
version: link:../../../repos/nerve/packages/workflow-utils
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@types/node':
specifier: ^22.0.0
version: 22.19.17
esbuild:
specifier: ^0.27.0
version: 0.27.7
typescript:
specifier: ^5.7.0
version: 5.9.3
workflows/extract-knowledge:
dependencies:
'@uncaged/nerve-adapter-cursor':
specifier: link:../../../repos/nerve/packages/adapter-cursor

3
pnpm-workspace.yaml Normal file
View File

@ -0,0 +1,3 @@
packages:
- "workflows/*"
- "senses/*"

View File

@ -1,46 +0,0 @@
import * as esbuild from "esbuild";
import fs from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
const root = path.join(path.dirname(fileURLToPath(import.meta.url)), "..");
const dist = path.join(root, "dist");
const opts = {
bundle: true,
platform: "node",
format: "esm",
packages: "external",
};
function listDirs(dir) {
if (!fs.existsSync(dir)) return [];
return fs
.readdirSync(dir)
.filter((name) => !name.startsWith(".") && !name.startsWith("_"))
.map((name) => ({ name, full: path.join(dir, name) }))
.filter(({ full }) => fs.statSync(full).isDirectory());
}
async function main() {
// Clean dist/
fs.rmSync(dist, { recursive: true, force: true });
for (const { name, full } of listDirs(path.join(root, "senses"))) {
const entry = path.join(full, "src", "index.ts");
if (!fs.existsSync(entry)) continue;
const outfile = path.join(dist, "senses", name, "index.js");
fs.mkdirSync(path.dirname(outfile), { recursive: true });
await esbuild.build({ ...opts, entryPoints: [entry], outfile });
}
for (const { name, full } of listDirs(path.join(root, "workflows"))) {
const entry = path.join(full, "index.ts");
if (!fs.existsSync(entry)) continue;
const outfile = path.join(dist, "workflows", name, "index.js");
fs.mkdirSync(path.dirname(outfile), { recursive: true });
await esbuild.build({ ...opts, entryPoints: [entry], outfile });
}
}
await main();

View File

@ -0,0 +1,122 @@
// src/index.ts
import { execFileSync } from "node:child_process";
import { resolve } from "node:path";
// src/schema.ts
import { integer, sqliteTable, text } from "drizzle-orm/sqlite-core";
var snapshots = sqliteTable("snapshots", {
ts: integer("ts").primaryKey(),
branch: text("branch").notNull(),
headShort: text("head_short").notNull(),
porcelainLines: integer("porcelain_lines").notNull(),
hasUpstream: integer("has_upstream").notNull(),
aheadCount: integer("ahead_count").notNull(),
behindCount: integer("behind_count").notNull(),
/** Empty string when the snapshot succeeded; otherwise a short error summary. */
gitError: text("git_error").notNull()
});
// src/index.ts
var GIT_TIMEOUT_MS = 15e3;
function workspaceRoot() {
const raw = process.env.GIT_WORKSPACE_ROOT;
return raw ? resolve(raw) : resolve(process.cwd());
}
function gitErrorMessage(err) {
if (err instanceof Error) {
const m = err.message.trim();
return m.length > 200 ? `${m.slice(0, 197)}...` : m;
}
return String(err);
}
function runGit(cwd, args) {
return execFileSync("git", args, {
cwd,
encoding: "utf8",
timeout: GIT_TIMEOUT_MS,
maxBuffer: 2 * 1024 * 1024
}).trimEnd();
}
function countPorcelainLines(output) {
if (!output) return 0;
return output.split("\n").filter((line) => line.length > 0).length;
}
async function compute(db, _peers) {
const root = workspaceRoot();
const ts = Date.now();
let branch = "";
let headShort = "";
let porcelainLines = 0;
let hasUpstream = 0;
let aheadCount = 0;
let behindCount = 0;
let gitError = "";
try {
const inside = runGit(root, ["rev-parse", "--is-inside-work-tree"]).trim();
if (inside !== "true") {
gitError = "not a git work tree";
await db.insert(snapshots).values({
ts,
branch,
headShort,
porcelainLines,
hasUpstream,
aheadCount,
behindCount,
gitError
});
return {
workspaceRoot: root,
branch,
headShort,
porcelainLines,
hasUpstream: false,
aheadCount,
behindCount,
gitError
};
}
branch = runGit(root, ["rev-parse", "--abbrev-ref", "HEAD"]);
headShort = runGit(root, ["rev-parse", "--short", "HEAD"]);
porcelainLines = countPorcelainLines(runGit(root, ["status", "--porcelain"]));
try {
runGit(root, ["rev-parse", "--abbrev-ref", "@{upstream}"]);
hasUpstream = 1;
const lb = runGit(root, ["rev-list", "--left-right", "--count", "HEAD...@{upstream}"]);
const parts = lb.split(/[\t\s]+/).filter(Boolean);
if (parts.length >= 2) {
aheadCount = Number.parseInt(parts[0], 10) || 0;
behindCount = Number.parseInt(parts[1], 10) || 0;
}
} catch {
hasUpstream = 0;
aheadCount = 0;
behindCount = 0;
}
} catch (e) {
gitError = gitErrorMessage(e);
}
await db.insert(snapshots).values({
ts,
branch,
headShort,
porcelainLines,
hasUpstream,
aheadCount,
behindCount,
gitError
});
return {
workspaceRoot: root,
branch,
headShort,
porcelainLines,
hasUpstream: hasUpstream === 1,
aheadCount,
behindCount,
gitError: gitError || void 0
};
}
export {
compute
};

View File

@ -0,0 +1,13 @@
-- Migration: 0001_init
-- Creates the snapshots table for git-workspace-status sense.
CREATE TABLE IF NOT EXISTS snapshots (
ts INTEGER PRIMARY KEY,
branch TEXT NOT NULL,
head_short TEXT NOT NULL,
porcelain_lines INTEGER NOT NULL,
has_upstream INTEGER NOT NULL,
ahead_count INTEGER NOT NULL,
behind_count INTEGER NOT NULL,
git_error TEXT NOT NULL
);

View File

@ -0,0 +1,14 @@
{
"name": "sense-git-workspace-status",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=index.js --packages=external"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,116 @@
import { execFileSync } from "node:child_process";
import { resolve } from "node:path";
import type { LibSQLDatabase } from "drizzle-orm/libsql";
import { snapshots } from "./schema.ts";
const GIT_TIMEOUT_MS = 15_000;
function workspaceRoot(): string {
const raw = process.env.GIT_WORKSPACE_ROOT;
return raw ? resolve(raw) : resolve(process.cwd());
}
function gitErrorMessage(err: unknown): string {
if (err instanceof Error) {
const m = err.message.trim();
return m.length > 200 ? `${m.slice(0, 197)}...` : m;
}
return String(err);
}
function runGit(cwd: string, args: string[]): string {
return execFileSync("git", args, {
cwd,
encoding: "utf8",
timeout: GIT_TIMEOUT_MS,
maxBuffer: 2 * 1024 * 1024,
}).trimEnd();
}
function countPorcelainLines(output: string): number {
if (!output) return 0;
return output.split("\n").filter((line) => line.length > 0).length;
}
export async function compute(db: LibSQLDatabase, _peers: unknown) {
const root = workspaceRoot();
const ts = Date.now();
let branch = "";
let headShort = "";
let porcelainLines = 0;
let hasUpstream = 0;
let aheadCount = 0;
let behindCount = 0;
let gitError = "";
try {
const inside = runGit(root, ["rev-parse", "--is-inside-work-tree"]).trim();
if (inside !== "true") {
gitError = "not a git work tree";
await db.insert(snapshots).values({
ts,
branch,
headShort,
porcelainLines,
hasUpstream,
aheadCount,
behindCount,
gitError,
});
return {
workspaceRoot: root,
branch,
headShort,
porcelainLines,
hasUpstream: false,
aheadCount,
behindCount,
gitError,
};
}
branch = runGit(root, ["rev-parse", "--abbrev-ref", "HEAD"]);
headShort = runGit(root, ["rev-parse", "--short", "HEAD"]);
porcelainLines = countPorcelainLines(runGit(root, ["status", "--porcelain"]));
try {
runGit(root, ["rev-parse", "--abbrev-ref", "@{upstream}"]);
hasUpstream = 1;
const lb = runGit(root, ["rev-list", "--left-right", "--count", "HEAD...@{upstream}"]);
const parts = lb.split(/[\t\s]+/).filter(Boolean);
if (parts.length >= 2) {
aheadCount = Number.parseInt(parts[0], 10) || 0;
behindCount = Number.parseInt(parts[1], 10) || 0;
}
} catch {
hasUpstream = 0;
aheadCount = 0;
behindCount = 0;
}
} catch (e) {
gitError = gitErrorMessage(e);
}
await db.insert(snapshots).values({
ts,
branch,
headShort,
porcelainLines,
hasUpstream,
aheadCount,
behindCount,
gitError,
});
return {
workspaceRoot: root,
branch,
headShort,
porcelainLines,
hasUpstream: hasUpstream === 1,
aheadCount,
behindCount,
gitError: gitError || undefined,
};
}

View File

@ -0,0 +1,13 @@
import { integer, sqliteTable, text } from "drizzle-orm/sqlite-core";
export const snapshots = sqliteTable("snapshots", {
ts: integer("ts").primaryKey(),
branch: text("branch").notNull(),
headShort: text("head_short").notNull(),
porcelainLines: integer("porcelain_lines").notNull(),
hasUpstream: integer("has_upstream").notNull(),
aheadCount: integer("ahead_count").notNull(),
behindCount: integer("behind_count").notNull(),
/** Empty string when the snapshot succeeded; otherwise a short error summary. */
gitError: text("git_error").notNull(),
});

View File

@ -0,0 +1,374 @@
// src/index.ts
import { execFile } from "node:child_process";
// src/schema.ts
import { integer, real, sqliteTable, text } from "drizzle-orm/sqlite-core";
var hermesGatewayHealth = sqliteTable("hermes_gateway_health", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
alive: integer("alive").notNull(),
mainPid: integer("main_pid").notNull(),
rssBytes: integer("rss_bytes").notNull(),
cpuPercent: real("cpu_percent").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
activeSessions: integer("active_sessions").notNull(),
childProcessCount: integer("child_process_count").notNull(),
httpOk: integer("http_ok").notNull(),
httpStatusCode: integer("http_status_code").notNull(),
httpLatencyMs: integer("http_latency_ms").notNull(),
httpError: text("http_error").notNull()
});
// src/index.ts
var EXEC_TIMEOUT_MS = 25e3;
var HTTP_TIMEOUT_MS = Math.min(23e3, EXEC_TIMEOUT_MS - 2e3);
var HTTP_ERROR_MAX_LEN = 256;
function gatewayProbeUrl() {
const u = process.env.HERMES_GATEWAY_HEALTH_URL ?? process.env.NERVE_HERMES_GATEWAY_URL ?? "";
return String(u).trim();
}
function truncateHttpError(err) {
const raw = err && typeof err === "object" && "code" in err && err.code ? String(err.code) : String(err?.message ?? err ?? "error");
const s = raw.trim() || "error";
return s.length > HTTP_ERROR_MAX_LEN ? s.slice(0, HTTP_ERROR_MAX_LEN) : s;
}
async function probeGatewayHttp(url) {
if (!url) {
return {
httpOk: 0,
httpStatusCode: 0,
httpLatencyMs: 0,
httpError: "missing_url"
};
}
const t0 = Date.now();
try {
const signal = AbortSignal.timeout(HTTP_TIMEOUT_MS);
const res = await fetch(url, {
method: "GET",
signal,
redirect: "follow"
});
const httpLatencyMs = Date.now() - t0;
const code = res.status;
const ok = code >= 200 && code < 400;
return {
httpOk: ok ? 1 : 0,
httpStatusCode: code,
httpLatencyMs,
httpError: ok ? "" : truncateHttpError({ message: `HTTP ${code}` })
};
} catch (err) {
const httpLatencyMs = Date.now() - t0;
return {
httpOk: 0,
httpStatusCode: 0,
httpLatencyMs,
httpError: truncateHttpError(err)
};
}
}
function etimeToSeconds(etime) {
let s = String(etime).trim();
if (!s) return 0;
let days = 0;
if (s.includes("-")) {
const idx = s.indexOf("-");
const d = Number.parseInt(s.slice(0, idx), 10);
days = Number.isFinite(d) ? d : 0;
s = s.slice(idx + 1);
}
const parts = s.split(":").map((x) => Number.parseInt(String(x).trim(), 10));
if (parts.some((n) => !Number.isFinite(n))) return 0;
if (parts.length === 3) {
return Math.trunc(days * 86400 + parts[0] * 3600 + parts[1] * 60 + parts[2]);
}
if (parts.length === 2) {
return Math.trunc(days * 86400 + parts[0] * 60 + parts[1]);
}
if (parts.length === 1) {
return Math.trunc(days * 86400 + parts[0]);
}
return 0;
}
function execFileUtf8(file, args, opts = {}) {
return new Promise((resolve) => {
execFile(
file,
args,
{
encoding: "utf8",
maxBuffer: 8 * 1024 * 1024,
timeout: EXEC_TIMEOUT_MS,
...opts
},
(err, stdout, stderr) => {
const exitCode = err && typeof err.status === "number" ? err.status : err ? -1 : 0;
resolve({
exitCode,
errCode: err?.code,
stdout: String(stdout ?? ""),
stderr: String(stderr ?? "")
});
}
);
});
}
function parseMainPidFromStatus(text2) {
const m = text2.match(/Main PID:\s*(\d+)/i);
return m ? Math.trunc(Number.parseInt(m[1], 10)) || 0 : 0;
}
function parseActiveLineFromStatus(text2) {
for (const line of text2.split("\n")) {
if (/^\s*Active:/i.test(line)) {
const m = line.match(/Active:\s*(\S+)\s*\(([^)]*)\)/i);
if (m) {
return {
active: m[1].toLowerCase() === "active",
subRunning: m[2].toLowerCase().includes("running")
};
}
}
}
return { active: false, subRunning: false };
}
function parseSystemctlShow(text2) {
let mainPid = 0;
let active = false;
let subRunning = false;
for (const line of text2.split("\n")) {
const t = line.trim();
if (t.startsWith("MainPID=")) {
mainPid = Math.trunc(Number.parseInt(t.slice("MainPID=".length), 10)) || 0;
} else if (t.startsWith("ActiveState=")) {
active = t.slice("ActiveState=".length).trim().toLowerCase() === "active";
} else if (t.startsWith("SubState=")) {
subRunning = t.slice("SubState=".length).trim().toLowerCase() === "running";
}
}
return { mainPid, active, subRunning };
}
async function readSystemdState() {
const status = await execFileUtf8("systemctl", [
"--user",
"--no-pager",
"status",
"hermes-gateway"
]);
const combined = `${status.stdout}
${status.stderr}`.trim();
let mainPid = parseMainPidFromStatus(combined);
let { active, subRunning } = parseActiveLineFromStatus(combined);
const needShow = mainPid <= 0 || !active || !subRunning;
if (needShow) {
const show = await execFileUtf8("systemctl", [
"--user",
"--no-pager",
"show",
"hermes-gateway",
"-p",
"MainPID",
"-p",
"ActiveState",
"-p",
"SubState"
]);
const showText = `${show.stdout}
${show.stderr}`;
const s = parseSystemctlShow(showText);
if (mainPid <= 0 && s.mainPid > 0) mainPid = s.mainPid;
if (!active) active = s.active;
if (!subRunning) subRunning = s.subRunning;
}
return { mainPid, systemdActiveRunning: active && subRunning };
}
async function processExists(mainPid) {
if (mainPid <= 0) return false;
const r = await execFileUtf8("ps", ["-p", String(mainPid), "-o", "pid="]);
if (r.errCode === "ENOENT") return false;
return r.stdout.trim().length > 0;
}
async function readPsMetrics(mainPid) {
if (mainPid <= 0) {
return { rssBytes: 0, cpuPercent: 0, uptimeSec: 0 };
}
let r = await execFileUtf8("ps", [
"-p",
String(mainPid),
"-o",
"rss=,%cpu=,etimes="
]);
let line = r.stdout.trim().replace(/\s+/g, " ");
if (r.errCode === "ENOENT" || !line) {
return { rssBytes: 0, cpuPercent: 0, uptimeSec: 0 };
}
let parts = line.split(" ").filter(Boolean);
if (parts.length < 3) {
r = await execFileUtf8("ps", [
"-p",
String(mainPid),
"-o",
"rss=,%cpu=,etime="
]);
line = r.stdout.trim().replace(/\s+/g, " ");
parts = line.split(" ").filter(Boolean);
if (parts.length < 3) {
return { rssBytes: 0, cpuPercent: 0, uptimeSec: 0 };
}
const rssKiB2 = Number(parts[0]);
const cpu2 = Number(parts[1]);
const uptimeSec2 = etimeToSeconds(parts.slice(2).join(" "));
const rssBytes2 = Number.isFinite(rssKiB2) ? Math.trunc(rssKiB2 * 1024) : 0;
const cpuPercent2 = Number.isFinite(cpu2) ? Math.round(cpu2 * 100) / 100 : 0;
return { rssBytes: rssBytes2, cpuPercent: cpuPercent2, uptimeSec: uptimeSec2 };
}
const rssKiB = Number(parts[0]);
const cpu = Number(parts[1]);
const etimes = Number(parts[2]);
const rssBytes = Number.isFinite(rssKiB) ? Math.trunc(rssKiB * 1024) : 0;
const cpuPercent = Number.isFinite(cpu) ? Math.round(cpu * 100) / 100 : 0;
const uptimeSec = Number.isFinite(etimes) ? Math.trunc(etimes) : 0;
return { rssBytes, cpuPercent, uptimeSec };
}
function parseActiveSessionsFromHermesStats(text2) {
const src = String(text2);
const patterns = [
/^\s*Active\s+sessions?:\s*(\d+)/gim,
/^\s*active\s+sessions?:\s*(\d+)/gim,
/^\s*Total\s+sessions?:\s*(\d+)/gim
];
for (const re of patterns) {
re.lastIndex = 0;
const m = re.exec(src);
if (m) {
const n = Math.trunc(Number.parseInt(m[1], 10));
return Number.isFinite(n) ? n : 0;
}
}
return 0;
}
async function readActiveSessions() {
try {
const r = await execFileUtf8("hermes", ["sessions", "stats"]);
if (r.errCode === "ENOENT") return 0;
return parseActiveSessionsFromHermesStats(`${r.stdout}
${r.stderr}`);
} catch {
return 0;
}
}
async function countDirectChildren(mainPid) {
if (mainPid <= 0) return 0;
try {
const r = await execFileUtf8("ps", [
"--no-headers",
"-o",
"pid",
"--ppid",
String(mainPid)
]);
if (r.errCode === "ENOENT") return 0;
const lines = r.stdout.split("\n").map((l) => l.trim()).filter(Boolean);
return lines.length;
} catch {
return 0;
}
}
async function compute(db, _peers) {
const ts = Date.now();
let mainPid = 0;
let systemdActiveRunning = false;
try {
const st = await readSystemdState();
mainPid = st.mainPid;
systemdActiveRunning = st.systemdActiveRunning;
} catch {
mainPid = 0;
systemdActiveRunning = false;
}
let psOk = false;
try {
psOk = await processExists(mainPid);
} catch {
psOk = false;
}
let rssBytes = 0;
let cpuPercent = 0;
let uptimeSec = 0;
if (psOk) {
try {
const m = await readPsMetrics(mainPid);
rssBytes = m.rssBytes;
cpuPercent = m.cpuPercent;
uptimeSec = m.uptimeSec;
} catch {
rssBytes = 0;
cpuPercent = 0;
uptimeSec = 0;
}
}
const alive = systemdActiveRunning && mainPid > 0 && psOk ? 1 : 0;
let activeSessions = 0;
try {
activeSessions = await readActiveSessions();
} catch {
activeSessions = 0;
}
let childProcessCount = 0;
if (alive && mainPid > 0) {
try {
childProcessCount = await countDirectChildren(mainPid);
} catch {
childProcessCount = 0;
}
}
let httpOk = 0;
let httpStatusCode = 0;
let httpLatencyMs = 0;
let httpError = "";
try {
const h = await probeGatewayHttp(gatewayProbeUrl());
httpOk = h.httpOk;
httpStatusCode = h.httpStatusCode;
httpLatencyMs = h.httpLatencyMs;
httpError = h.httpError;
} catch {
httpOk = 0;
httpStatusCode = 0;
httpLatencyMs = 0;
httpError = "probe_failed";
}
const storedMainPid = mainPid > 0 ? mainPid : 0;
const row = {
ts,
alive,
mainPid: storedMainPid,
rssBytes: alive ? rssBytes : 0,
cpuPercent: alive ? cpuPercent : 0,
uptimeSec: alive ? uptimeSec : 0,
activeSessions,
childProcessCount: alive ? childProcessCount : 0,
httpOk,
httpStatusCode,
httpLatencyMs,
httpError
};
await db.insert(hermesGatewayHealth).values(row);
return {
ts: row.ts,
alive: row.alive,
mainPid: row.mainPid,
rssBytes: row.rssBytes,
cpuPercent: row.cpuPercent,
uptimeSec: row.uptimeSec,
activeSessions: row.activeSessions,
childProcessCount: row.childProcessCount,
httpOk: row.httpOk,
httpStatusCode: row.httpStatusCode,
httpLatencyMs: row.httpLatencyMs,
httpError: row.httpError
};
}
export {
compute
};

View File

@ -0,0 +1,14 @@
-- Migration: 0001_init
-- Creates the hermes_gateway_health table for hermes-gateway-health sense.
CREATE TABLE IF NOT EXISTS hermes_gateway_health (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts INTEGER NOT NULL,
alive INTEGER NOT NULL,
main_pid INTEGER NOT NULL,
rss_bytes INTEGER NOT NULL,
cpu_percent REAL NOT NULL,
uptime_sec INTEGER NOT NULL,
active_sessions INTEGER NOT NULL,
child_process_count INTEGER NOT NULL
);

View File

@ -0,0 +1,7 @@
-- Migration: 0002_add_http_probe
-- HTTP reachability columns for hermes-gateway-health sense.
ALTER TABLE hermes_gateway_health ADD COLUMN http_ok INTEGER NOT NULL DEFAULT 0;
ALTER TABLE hermes_gateway_health ADD COLUMN http_status_code INTEGER NOT NULL DEFAULT 0;
ALTER TABLE hermes_gateway_health ADD COLUMN http_latency_ms INTEGER NOT NULL DEFAULT 0;
ALTER TABLE hermes_gateway_health ADD COLUMN http_error TEXT NOT NULL DEFAULT '';

View File

@ -0,0 +1,14 @@
{
"name": "sense-hermes-gateway-health",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=index.js --packages=external"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -1,4 +1,7 @@
import { execFile } from "node:child_process";
import type { LibSQLDatabase } from "drizzle-orm/libsql";
import { hermesGatewayHealth } from "./schema.ts";
/** Keep subprocess deadlines slightly under typical sense timeout (30s). */
const EXEC_TIMEOUT_MS = 25_000;
@ -7,22 +10,6 @@ const HTTP_TIMEOUT_MS = Math.min(23_000, EXEC_TIMEOUT_MS - 2000);
const HTTP_ERROR_MAX_LEN = 256;
/** How many consecutive failures before triggering a restart. */
const FAILURE_THRESHOLD = 3;
type SenseState = {
consecutiveFailures: number;
lastRestartTs: number;
/** Minimum ms between restart attempts to avoid restart loops. */
restartCooldownMs: number;
};
export const initialState: SenseState = {
consecutiveFailures: 0,
lastRestartTs: 0,
restartCooldownMs: 300_000, // 5 minutes
};
function gatewayProbeUrl(): string {
const u =
process.env.HERMES_GATEWAY_HEALTH_URL ??
@ -40,13 +27,17 @@ function truncateHttpError(err: unknown): string {
return s.length > HTTP_ERROR_MAX_LEN ? s.slice(0, HTTP_ERROR_MAX_LEN) : s;
}
type HttpProbeResult = {
interface HttpProbeResult {
httpOk: number;
httpStatusCode: number;
httpLatencyMs: number;
httpError: string;
};
}
/**
* GET the gateway URL; success = HTTP 200399.
* URL must be set via HERMES_GATEWAY_HEALTH_URL or NERVE_HERMES_GATEWAY_URL.
*/
async function probeGatewayHttp(url: string): Promise<HttpProbeResult> {
if (!url) {
return {
@ -84,6 +75,10 @@ async function probeGatewayHttp(url: string): Promise<HttpProbeResult> {
}
}
/**
* When `ps` lacks `etimes` (wall-clock seconds since start), parse `etime`
* ([[dd-]hh:]mm:ss) into seconds. See ps(1) `etime` field description.
*/
function etimeToSeconds(etime: string): number {
let s = String(etime).trim();
if (!s) return 0;
@ -108,12 +103,12 @@ function etimeToSeconds(etime: string): number {
return 0;
}
type ExecResult = {
interface ExecResult {
exitCode: number;
errCode: string | undefined;
stdout: string;
stderr: string;
};
}
function execFileUtf8(file: string, args: string[], opts: Record<string, unknown> = {}): Promise<ExecResult> {
return new Promise((resolve) => {
@ -222,11 +217,11 @@ async function processExists(mainPid: number): Promise<boolean> {
return r.stdout.trim().length > 0;
}
type PsMetrics = {
interface PsMetrics {
rssBytes: number;
cpuPercent: number;
uptimeSec: number;
};
}
async function readPsMetrics(mainPid: number): Promise<PsMetrics> {
if (mainPid <= 0) {
@ -271,12 +266,61 @@ async function readPsMetrics(mainPid: number): Promise<PsMetrics> {
return { rssBytes, cpuPercent, uptimeSec };
}
export async function compute(prevState: SenseState) {
const now = Date.now();
function parseActiveSessionsFromHermesStats(text: string): number {
const src = String(text);
const patterns = [
/^\s*Active\s+sessions?:\s*(\d+)/gim,
/^\s*active\s+sessions?:\s*(\d+)/gim,
/^\s*Total\s+sessions?:\s*(\d+)/gim,
];
for (const re of patterns) {
re.lastIndex = 0;
const m = re.exec(src);
if (m) {
const n = Math.trunc(Number.parseInt(m[1], 10));
return Number.isFinite(n) ? n : 0;
}
}
return 0;
}
async function readActiveSessions(): Promise<number> {
try {
const r = await execFileUtf8("hermes", ["sessions", "stats"]);
if (r.errCode === "ENOENT") return 0;
return parseActiveSessionsFromHermesStats(`${r.stdout}\n${r.stderr}`);
} catch {
return 0;
}
}
async function countDirectChildren(mainPid: number): Promise<number> {
if (mainPid <= 0) return 0;
try {
const r = await execFileUtf8("ps", [
"--no-headers",
"-o",
"pid",
"--ppid",
String(mainPid),
]);
if (r.errCode === "ENOENT") return 0;
const lines = r.stdout
.split("\n")
.map((l) => l.trim())
.filter(Boolean);
return lines.length;
} catch {
return 0;
}
}
export async function compute(db: LibSQLDatabase, _peers: unknown) {
const ts = Date.now();
// --- probe gateway ---
let mainPid = 0;
let systemdActiveRunning = false;
try {
const st = await readSystemdState();
mainPid = st.mainPid;
@ -311,6 +355,22 @@ export async function compute(prevState: SenseState) {
const alive = systemdActiveRunning && mainPid > 0 && psOk ? 1 : 0;
let activeSessions = 0;
try {
activeSessions = await readActiveSessions();
} catch {
activeSessions = 0;
}
let childProcessCount = 0;
if (alive && mainPid > 0) {
try {
childProcessCount = await countDirectChildren(mainPid);
} catch {
childProcessCount = 0;
}
}
let httpOk = 0;
let httpStatusCode = 0;
let httpLatencyMs = 0;
@ -328,42 +388,37 @@ export async function compute(prevState: SenseState) {
httpError = "probe_failed";
}
// --- decide health ---
const healthy = alive === 1 && httpOk === 1;
const storedMainPid = mainPid > 0 ? mainPid : 0;
// --- state machine: track consecutive failures ---
const consecutiveFailures = healthy ? 0 : prevState.consecutiveFailures + 1;
const lastRestartTs = prevState.lastRestartTs;
const cooldown = prevState.restartCooldownMs;
const cooldownElapsed = now - lastRestartTs >= cooldown;
// --- trigger restart? ---
const shouldRestart =
consecutiveFailures >= FAILURE_THRESHOLD && cooldownElapsed;
const nextState: SenseState = {
consecutiveFailures,
lastRestartTs: shouldRestart ? now : lastRestartTs,
restartCooldownMs: cooldown,
};
const signal = {
ts: now,
const row = {
ts,
alive,
mainPid: mainPid > 0 ? mainPid : 0,
mainPid: storedMainPid,
rssBytes: alive ? rssBytes : 0,
cpuPercent: alive ? cpuPercent : 0,
uptimeSec: alive ? uptimeSec : 0,
activeSessions,
childProcessCount: alive ? childProcessCount : 0,
httpOk,
httpStatusCode,
httpLatencyMs,
httpError,
consecutiveFailures,
};
const trigger = shouldRestart
? { command: "systemctl --user restart hermes-gateway" }
: null;
await db.insert(hermesGatewayHealth).values(row);
return { state: nextState, signal, trigger };
return {
ts: row.ts,
alive: row.alive,
mainPid: row.mainPid,
rssBytes: row.rssBytes,
cpuPercent: row.cpuPercent,
uptimeSec: row.uptimeSec,
activeSessions: row.activeSessions,
childProcessCount: row.childProcessCount,
httpOk: row.httpOk,
httpStatusCode: row.httpStatusCode,
httpLatencyMs: row.httpLatencyMs,
httpError: row.httpError,
};
}

View File

@ -0,0 +1,17 @@
import { integer, real, sqliteTable, text } from "drizzle-orm/sqlite-core";
export const hermesGatewayHealth = sqliteTable("hermes_gateway_health", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
alive: integer("alive").notNull(),
mainPid: integer("main_pid").notNull(),
rssBytes: integer("rss_bytes").notNull(),
cpuPercent: real("cpu_percent").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
activeSessions: integer("active_sessions").notNull(),
childProcessCount: integer("child_process_count").notNull(),
httpOk: integer("http_ok").notNull(),
httpStatusCode: integer("http_status_code").notNull(),
httpLatencyMs: integer("http_latency_ms").notNull(),
httpError: text("http_error").notNull(),
});

View File

@ -0,0 +1,118 @@
// src/index.ts
import { createReadStream } from "node:fs";
import { readdir } from "node:fs/promises";
import { homedir } from "node:os";
import { join } from "node:path";
import { createInterface } from "node:readline";
// src/schema.ts
import { integer, sqliteTable } from "drizzle-orm/sqlite-core";
var hermesSessionMessageStats = sqliteTable("hermes_session_message_stats", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
totalUserMessages: integer("total_user_messages").notNull(),
totalAssistantMessages: integer("total_assistant_messages").notNull(),
totalToolMessages: integer("total_tool_messages").notNull(),
totalMessages: integer("total_messages").notNull(),
activeSessions: integer("active_sessions").notNull(),
measurementWindowSeconds: integer("measurement_window_seconds").notNull()
});
// src/index.ts
var MEASUREMENT_WINDOW_MS = 9e5;
var MEASUREMENT_WINDOW_SECONDS = 900;
async function aggregateJsonlFile(filePath, cutoffMs, nowMs) {
let user = 0;
let assistant = 0;
let tool = 0;
let fileHadActivity = false;
const input = createReadStream(filePath, { encoding: "utf8" });
const rl = createInterface({ input, crlfDelay: Infinity });
try {
for await (const line of rl) {
const trimmed = line.trim();
if (!trimmed) continue;
let obj;
try {
obj = JSON.parse(trimmed);
} catch {
continue;
}
if (typeof obj !== "object" || obj === null || typeof obj.role !== "string" || typeof obj.timestamp !== "string") {
continue;
}
const record = obj;
const t = Date.parse(record.timestamp);
if (!Number.isFinite(t) || t < cutoffMs || t > nowMs) continue;
const roleNorm = record.role.trim().toLowerCase();
if (roleNorm === "user") {
user++;
fileHadActivity = true;
} else if (roleNorm === "assistant") {
assistant++;
fileHadActivity = true;
} else if (roleNorm === "tool") {
tool++;
fileHadActivity = true;
}
}
} finally {
rl.close();
}
return { user, assistant, tool, fileHadActivity };
}
async function compute(db, _peers) {
const nowMs = Date.now();
const cutoffMs = nowMs - MEASUREMENT_WINDOW_MS;
const ts = nowMs;
let totalUserMessages = 0;
let totalAssistantMessages = 0;
let totalToolMessages = 0;
let activeSessions = 0;
const sessionsDir = join(homedir(), ".hermes", "sessions");
let files = [];
try {
const entries = await readdir(sessionsDir, { withFileTypes: true });
files = entries.filter((e) => e.isFile() && e.name.endsWith(".jsonl")).map((e) => join(sessionsDir, e.name));
} catch (err) {
if (err && typeof err === "object" && "code" in err && err.code === "ENOENT") {
files = [];
} else {
throw err;
}
}
for (const filePath of files) {
const { user, assistant, tool, fileHadActivity } = await aggregateJsonlFile(
filePath,
cutoffMs,
nowMs
);
totalUserMessages += user;
totalAssistantMessages += assistant;
totalToolMessages += tool;
if (fileHadActivity) activeSessions++;
}
const totalMessages = totalUserMessages + totalAssistantMessages + totalToolMessages;
const row = {
ts,
totalUserMessages,
totalAssistantMessages,
totalToolMessages,
totalMessages,
activeSessions,
measurementWindowSeconds: MEASUREMENT_WINDOW_SECONDS
};
await db.insert(hermesSessionMessageStats).values(row);
return {
ts: row.ts,
totalUserMessages: row.totalUserMessages,
totalAssistantMessages: row.totalAssistantMessages,
totalToolMessages: row.totalToolMessages,
totalMessages: row.totalMessages,
activeSessions: row.activeSessions,
measurementWindowSeconds: row.measurementWindowSeconds
};
}
export {
compute
};

View File

@ -0,0 +1,13 @@
-- Migration: 0001_init
-- Creates the hermes_session_message_stats table for hermes-session-message-stats sense.
CREATE TABLE IF NOT EXISTS hermes_session_message_stats (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts INTEGER NOT NULL,
total_user_messages INTEGER NOT NULL,
total_assistant_messages INTEGER NOT NULL,
total_tool_messages INTEGER NOT NULL,
total_messages INTEGER NOT NULL,
active_sessions INTEGER NOT NULL,
measurement_window_seconds INTEGER NOT NULL
);

View File

@ -0,0 +1,14 @@
{
"name": "sense-hermes-session-message-stats",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=index.js --packages=external"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,128 @@
import { createReadStream } from "node:fs";
import { readdir } from "node:fs/promises";
import { homedir } from "node:os";
import { join } from "node:path";
import { createInterface } from "node:readline";
import type { LibSQLDatabase } from "drizzle-orm/libsql";
import { hermesSessionMessageStats } from "./schema.ts";
const MEASUREMENT_WINDOW_MS = 900_000;
const MEASUREMENT_WINDOW_SECONDS = 900;
interface MessageCounts {
user: number;
assistant: number;
tool: number;
fileHadActivity: boolean;
}
async function aggregateJsonlFile(filePath: string, cutoffMs: number, nowMs: number): Promise<MessageCounts> {
let user = 0;
let assistant = 0;
let tool = 0;
let fileHadActivity = false;
const input = createReadStream(filePath, { encoding: "utf8" });
const rl = createInterface({ input, crlfDelay: Infinity });
try {
for await (const line of rl) {
const trimmed = line.trim();
if (!trimmed) continue;
let obj: unknown;
try {
obj = JSON.parse(trimmed);
} catch {
continue;
}
if (
typeof obj !== "object" || obj === null ||
typeof (obj as Record<string, unknown>).role !== "string" ||
typeof (obj as Record<string, unknown>).timestamp !== "string"
) {
continue;
}
const record = obj as { role: string; timestamp: string };
const t = Date.parse(record.timestamp);
if (!Number.isFinite(t) || t < cutoffMs || t > nowMs) continue;
const roleNorm = record.role.trim().toLowerCase();
if (roleNorm === "user") {
user++;
fileHadActivity = true;
} else if (roleNorm === "assistant") {
assistant++;
fileHadActivity = true;
} else if (roleNorm === "tool") {
tool++;
fileHadActivity = true;
}
}
} finally {
rl.close();
}
return { user, assistant, tool, fileHadActivity };
}
export async function compute(db: LibSQLDatabase, _peers: unknown) {
const nowMs = Date.now();
const cutoffMs = nowMs - MEASUREMENT_WINDOW_MS;
const ts = nowMs;
let totalUserMessages = 0;
let totalAssistantMessages = 0;
let totalToolMessages = 0;
let activeSessions = 0;
const sessionsDir = join(homedir(), ".hermes", "sessions");
let files: string[] = [];
try {
const entries = await readdir(sessionsDir, { withFileTypes: true });
files = entries
.filter((e) => e.isFile() && e.name.endsWith(".jsonl"))
.map((e) => join(sessionsDir, e.name));
} catch (err) {
if (err && typeof err === "object" && "code" in err && (err as NodeJS.ErrnoException).code === "ENOENT") {
files = [];
} else {
throw err;
}
}
for (const filePath of files) {
const { user, assistant, tool, fileHadActivity } = await aggregateJsonlFile(
filePath,
cutoffMs,
nowMs,
);
totalUserMessages += user;
totalAssistantMessages += assistant;
totalToolMessages += tool;
if (fileHadActivity) activeSessions++;
}
const totalMessages =
totalUserMessages + totalAssistantMessages + totalToolMessages;
const row = {
ts,
totalUserMessages,
totalAssistantMessages,
totalToolMessages,
totalMessages,
activeSessions,
measurementWindowSeconds: MEASUREMENT_WINDOW_SECONDS,
};
await db.insert(hermesSessionMessageStats).values(row);
return {
ts: row.ts,
totalUserMessages: row.totalUserMessages,
totalAssistantMessages: row.totalAssistantMessages,
totalToolMessages: row.totalToolMessages,
totalMessages: row.totalMessages,
activeSessions: row.activeSessions,
measurementWindowSeconds: row.measurementWindowSeconds,
};
}

View File

@ -0,0 +1,12 @@
import { integer, sqliteTable } from "drizzle-orm/sqlite-core";
export const hermesSessionMessageStats = sqliteTable("hermes_session_message_stats", {
id: integer("id").primaryKey({ autoIncrement: true }),
ts: integer("ts").notNull(),
totalUserMessages: integer("total_user_messages").notNull(),
totalAssistantMessages: integer("total_assistant_messages").notNull(),
totalToolMessages: integer("total_tool_messages").notNull(),
totalMessages: integer("total_messages").notNull(),
activeSessions: integer("active_sessions").notNull(),
measurementWindowSeconds: integer("measurement_window_seconds").notNull(),
});

View File

@ -0,0 +1,112 @@
// src/index.ts
import { loadavg, totalmem, freemem, uptime } from "node:os";
import { execSync } from "node:child_process";
import { readFile } from "node:fs/promises";
// src/schema.ts
import { integer, real, sqliteTable } from "drizzle-orm/sqlite-core";
var snapshots = sqliteTable("snapshots", {
ts: integer("ts").primaryKey(),
cpuLoad1m: real("cpu_load_1m").notNull(),
cpuLoad5m: real("cpu_load_5m").notNull(),
cpuLoad15m: real("cpu_load_15m").notNull(),
memTotalMB: integer("mem_total_mb").notNull(),
memUsedMB: integer("mem_used_mb").notNull(),
memUsedPct: real("mem_used_pct").notNull(),
diskTotalGB: real("disk_total_gb").notNull(),
diskUsedGB: real("disk_used_gb").notNull(),
diskUsedPct: real("disk_used_pct").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
// TCP socket stats (merged from linux-tcp-socket-stats)
socketsUsed: integer("sockets_used"),
tcpInuse: integer("tcp_inuse"),
tcpOrphan: integer("tcp_orphan"),
tcpTw: integer("tcp_tw"),
tcpAlloc: integer("tcp_alloc"),
tcpMemPages: integer("tcp_mem_pages")
});
// src/index.ts
var SOCKSTAT_PATH = "/proc/net/sockstat";
function parseSockstat(content) {
let socketsUsed = 0, tcpInuse = 0, tcpOrphan = 0, tcpTw = 0, tcpAlloc = 0, tcpMemPages = 0;
for (const line of content.split("\n")) {
const trimmed = line.trim();
if (trimmed.startsWith("sockets:")) {
const parts = trimmed.split(/\s+/);
const idx = parts.indexOf("used");
if (idx !== -1 && idx + 1 < parts.length) {
socketsUsed = Number.parseInt(parts[idx + 1], 10) || 0;
}
} else if (trimmed.startsWith("TCP:")) {
const parts = trimmed.split(/\s+/);
const map = {};
for (let i = 1; i + 1 < parts.length; i += 2) {
map[parts[i]] = Number.parseInt(parts[i + 1], 10) || 0;
}
tcpInuse = map.inuse ?? 0;
tcpOrphan = map.orphan ?? 0;
tcpTw = map.tw ?? 0;
tcpAlloc = map.alloc ?? 0;
tcpMemPages = map.mem ?? 0;
}
}
return { socketsUsed, tcpInuse, tcpOrphan, tcpTw, tcpAlloc, tcpMemPages };
}
async function compute(db, _peers) {
const [load1, load5, load15] = loadavg();
const memTotal = totalmem();
const memFree = freemem();
const memUsed = memTotal - memFree;
const memTotalMB = Math.round(memTotal / 1024 / 1024);
const memUsedMB = Math.round(memUsed / 1024 / 1024);
const memUsedPct = Math.round(memUsed / memTotal * 1e4) / 100;
let diskTotalGB = 0, diskUsedGB = 0, diskUsedPct = 0;
try {
const df = execSync("df -B1 / | tail -1", { encoding: "utf-8" }).trim();
const parts = df.split(/\s+/);
const total = Number(parts[1]);
const used = Number(parts[2]);
diskTotalGB = Math.round(total / 1024 / 1024 / 1024 * 100) / 100;
diskUsedGB = Math.round(used / 1024 / 1024 / 1024 * 100) / 100;
diskUsedPct = total > 0 ? Math.round(used / total * 1e4) / 100 : 0;
} catch {
}
let tcp = { socketsUsed: 0, tcpInuse: 0, tcpOrphan: 0, tcpTw: 0, tcpAlloc: 0, tcpMemPages: 0 };
try {
const content = await readFile(SOCKSTAT_PATH, "utf8");
tcp = parseSockstat(content);
} catch {
}
const ts = Date.now();
const uptimeSec = Math.round(uptime());
await db.insert(snapshots).values({
ts,
cpuLoad1m: load1,
cpuLoad5m: load5,
cpuLoad15m: load15,
memTotalMB,
memUsedMB,
memUsedPct,
diskTotalGB,
diskUsedGB,
diskUsedPct,
uptimeSec,
socketsUsed: tcp.socketsUsed,
tcpInuse: tcp.tcpInuse,
tcpOrphan: tcp.tcpOrphan,
tcpTw: tcp.tcpTw,
tcpAlloc: tcp.tcpAlloc,
tcpMemPages: tcp.tcpMemPages
});
return {
cpu: { load1m: load1, load5m: load5, load15m: load15 },
memory: { totalMB: memTotalMB, usedMB: memUsedMB, usedPct: memUsedPct },
disk: { totalGB: diskTotalGB, usedGB: diskUsedGB, usedPct: diskUsedPct },
tcp: { socketsUsed: tcp.socketsUsed, inuse: tcp.tcpInuse, orphan: tcp.tcpOrphan, tw: tcp.tcpTw, alloc: tcp.tcpAlloc, memPages: tcp.tcpMemPages },
uptimeSec
};
}
export {
compute
};

View File

@ -0,0 +1,16 @@
-- Migration: 0001_init
-- Creates the snapshots table for linux-system-health sense.
CREATE TABLE IF NOT EXISTS snapshots (
ts INTEGER PRIMARY KEY,
cpu_load_1m REAL NOT NULL,
cpu_load_5m REAL NOT NULL,
cpu_load_15m REAL NOT NULL,
mem_total_mb INTEGER NOT NULL,
mem_used_mb INTEGER NOT NULL,
mem_used_pct REAL NOT NULL,
disk_total_gb REAL NOT NULL,
disk_used_gb REAL NOT NULL,
disk_used_pct REAL NOT NULL,
uptime_sec INTEGER NOT NULL
);

View File

@ -0,0 +1,6 @@
ALTER TABLE snapshots ADD COLUMN sockets_used INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_inuse INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_orphan INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_tw INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_alloc INTEGER;
ALTER TABLE snapshots ADD COLUMN tcp_mem_pages INTEGER;

View File

@ -0,0 +1,14 @@
{
"name": "sense-linux-system-health",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=index.js --packages=external"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,96 @@
import { loadavg, totalmem, freemem, uptime } from "node:os";
import { execSync } from "node:child_process";
import { readFile } from "node:fs/promises";
import type { LibSQLDatabase } from "drizzle-orm/libsql";
import { snapshots } from "./schema.ts";
const SOCKSTAT_PATH = "/proc/net/sockstat";
interface SockstatResult {
socketsUsed: number;
tcpInuse: number;
tcpOrphan: number;
tcpTw: number;
tcpAlloc: number;
tcpMemPages: number;
}
function parseSockstat(content: string): SockstatResult {
let socketsUsed = 0, tcpInuse = 0, tcpOrphan = 0, tcpTw = 0, tcpAlloc = 0, tcpMemPages = 0;
for (const line of content.split("\n")) {
const trimmed = line.trim();
if (trimmed.startsWith("sockets:")) {
const parts = trimmed.split(/\s+/);
const idx = parts.indexOf("used");
if (idx !== -1 && idx + 1 < parts.length) {
socketsUsed = Number.parseInt(parts[idx + 1], 10) || 0;
}
} else if (trimmed.startsWith("TCP:")) {
const parts = trimmed.split(/\s+/);
const map: Record<string, number> = {};
for (let i = 1; i + 1 < parts.length; i += 2) {
map[parts[i]] = Number.parseInt(parts[i + 1], 10) || 0;
}
tcpInuse = map.inuse ?? 0;
tcpOrphan = map.orphan ?? 0;
tcpTw = map.tw ?? 0;
tcpAlloc = map.alloc ?? 0;
tcpMemPages = map.mem ?? 0;
}
}
return { socketsUsed, tcpInuse, tcpOrphan, tcpTw, tcpAlloc, tcpMemPages };
}
export async function compute(db: LibSQLDatabase, _peers: unknown) {
const [load1, load5, load15] = loadavg();
const memTotal = totalmem();
const memFree = freemem();
const memUsed = memTotal - memFree;
const memTotalMB = Math.round(memTotal / 1024 / 1024);
const memUsedMB = Math.round(memUsed / 1024 / 1024);
const memUsedPct = Math.round((memUsed / memTotal) * 10000) / 100;
let diskTotalGB = 0, diskUsedGB = 0, diskUsedPct = 0;
try {
const df = execSync("df -B1 / | tail -1", { encoding: "utf-8" }).trim();
const parts = df.split(/\s+/);
const total = Number(parts[1]);
const used = Number(parts[2]);
diskTotalGB = Math.round(total / 1024 / 1024 / 1024 * 100) / 100;
diskUsedGB = Math.round(used / 1024 / 1024 / 1024 * 100) / 100;
diskUsedPct = total > 0 ? Math.round((used / total) * 10000) / 100 : 0;
} catch {}
let tcp: SockstatResult = { socketsUsed: 0, tcpInuse: 0, tcpOrphan: 0, tcpTw: 0, tcpAlloc: 0, tcpMemPages: 0 };
try {
const content = await readFile(SOCKSTAT_PATH, "utf8");
tcp = parseSockstat(content);
} catch {}
const ts = Date.now();
const uptimeSec = Math.round(uptime());
await db.insert(snapshots).values({
ts, cpuLoad1m: load1, cpuLoad5m: load5, cpuLoad15m: load15,
memTotalMB, memUsedMB, memUsedPct,
diskTotalGB, diskUsedGB, diskUsedPct,
uptimeSec,
socketsUsed: tcp.socketsUsed,
tcpInuse: tcp.tcpInuse,
tcpOrphan: tcp.tcpOrphan,
tcpTw: tcp.tcpTw,
tcpAlloc: tcp.tcpAlloc,
tcpMemPages: tcp.tcpMemPages,
});
return {
cpu: { load1m: load1, load5m: load5, load15m: load15 },
memory: { totalMB: memTotalMB, usedMB: memUsedMB, usedPct: memUsedPct },
disk: { totalGB: diskTotalGB, usedGB: diskUsedGB, usedPct: diskUsedPct },
tcp: { socketsUsed: tcp.socketsUsed, inuse: tcp.tcpInuse, orphan: tcp.tcpOrphan, tw: tcp.tcpTw, alloc: tcp.tcpAlloc, memPages: tcp.tcpMemPages },
uptimeSec,
};
}

View File

@ -0,0 +1,22 @@
import { integer, real, sqliteTable, text } from "drizzle-orm/sqlite-core";
export const snapshots = sqliteTable("snapshots", {
ts: integer("ts").primaryKey(),
cpuLoad1m: real("cpu_load_1m").notNull(),
cpuLoad5m: real("cpu_load_5m").notNull(),
cpuLoad15m: real("cpu_load_15m").notNull(),
memTotalMB: integer("mem_total_mb").notNull(),
memUsedMB: integer("mem_used_mb").notNull(),
memUsedPct: real("mem_used_pct").notNull(),
diskTotalGB: real("disk_total_gb").notNull(),
diskUsedGB: real("disk_used_gb").notNull(),
diskUsedPct: real("disk_used_pct").notNull(),
uptimeSec: integer("uptime_sec").notNull(),
// TCP socket stats (merged from linux-tcp-socket-stats)
socketsUsed: integer("sockets_used"),
tcpInuse: integer("tcp_inuse"),
tcpOrphan: integer("tcp_orphan"),
tcpTw: integer("tcp_tw"),
tcpAlloc: integer("tcp_alloc"),
tcpMemPages: integer("tcp_mem_pages"),
});

View File

@ -0,0 +1,44 @@
// src/schema.ts
import { integer, real, sqliteTable } from "drizzle-orm/sqlite-core";
var workerProcessMetrics = sqliteTable("worker_process_metrics", {
ts: integer("ts").primaryKey(),
pid: integer("pid").notNull(),
uptimeSec: real("uptime_sec").notNull(),
heapUsedMB: real("heap_used_mb").notNull(),
rssMB: real("rss_mb").notNull(),
externalMB: real("external_mb").notNull()
});
// src/index.ts
function round2(n) {
return Math.round(n * 100) / 100;
}
async function compute(db, _peers) {
const ts = Date.now();
const pid = process.pid;
const uptimeSec = process.uptime();
const m = process.memoryUsage();
const heapUsedMB = round2(m.heapUsed / 1024 / 1024);
const rssMB = round2(m.rss / 1024 / 1024);
const externalMB = round2(m.external / 1024 / 1024);
const row = {
ts,
pid,
uptimeSec,
heapUsedMB,
rssMB,
externalMB
};
await db.insert(workerProcessMetrics).values(row);
return {
ts: row.ts,
pid: row.pid,
uptimeSec: row.uptimeSec,
heapUsedMB: row.heapUsedMB,
rssMB: row.rssMB,
externalMB: row.externalMB
};
}
export {
compute
};

View File

@ -0,0 +1,11 @@
-- Migration: 0001_init
-- Creates the worker_process_metrics table for worker-process-metrics sense.
CREATE TABLE IF NOT EXISTS worker_process_metrics (
ts INTEGER PRIMARY KEY,
pid INTEGER NOT NULL,
uptime_sec REAL NOT NULL,
heap_used_mb REAL NOT NULL,
rss_mb REAL NOT NULL,
external_mb REAL NOT NULL
);

View File

@ -0,0 +1,14 @@
{
"name": "sense-worker-process-metrics",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild src/index.ts --bundle --platform=node --format=esm --outfile=index.js --packages=external"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,36 @@
import type { LibSQLDatabase } from "drizzle-orm/libsql";
import { workerProcessMetrics } from "./schema.ts";
function round2(n: number): number {
return Math.round(n * 100) / 100;
}
export async function compute(db: LibSQLDatabase, _peers: unknown) {
const ts = Date.now();
const pid = process.pid;
const uptimeSec = process.uptime();
const m = process.memoryUsage();
const heapUsedMB = round2(m.heapUsed / 1024 / 1024);
const rssMB = round2(m.rss / 1024 / 1024);
const externalMB = round2(m.external / 1024 / 1024);
const row = {
ts,
pid,
uptimeSec,
heapUsedMB,
rssMB,
externalMB,
};
await db.insert(workerProcessMetrics).values(row);
return {
ts: row.ts,
pid: row.pid,
uptimeSec: row.uptimeSec,
heapUsedMB: row.heapUsedMB,
rssMB: row.rssMB,
externalMB: row.externalMB,
};
}

View File

@ -0,0 +1,10 @@
import { integer, real, sqliteTable } from "drizzle-orm/sqlite-core";
export const workerProcessMetrics = sqliteTable("worker_process_metrics", {
ts: integer("ts").primaryKey(),
pid: integer("pid").notNull(),
uptimeSec: real("uptime_sec").notNull(),
heapUsedMB: real("heap_used_mb").notNull(),
rssMB: real("rss_mb").notNull(),
externalMB: real("external_mb").notNull(),
});

1
workflows/develop-sense/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist/

View File

@ -0,0 +1,70 @@
import type { StartStep, WorkflowDefinition } from "@uncaged/nerve-core";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { coderPrompt } from "./roles/coder/prompt.js";
import { coderMetaSchema } from "./roles/coder/index.js";
import { plannerPrompt } from "./roles/planner/prompt.js";
import { plannerMetaSchema } from "./roles/planner/index.js";
import { reviewerPrompt } from "./roles/reviewer/prompt.js";
import { reviewerMetaSchema } from "./roles/reviewer/index.js";
import { testerPrompt } from "./roles/tester/prompt.js";
import { testerMetaSchema } from "./roles/tester/index.js";
import { buildCommitterRole } from "./roles/committer/index.js";
import { moderator } from "./moderator.js";
import type { SenseMeta } from "./moderator.js";
export type BuildSenseGeneratorDeps = {
extract: LlmExtractorConfig;
cwd: string;
};
const CURSOR_TIMEOUT_MS = 300_000;
export function buildSenseGenerator({
extract,
cwd,
}: BuildSenseGeneratorDeps): WorkflowDefinition<SenseMeta> {
const roles = {
planner: createRole(
createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
async (start: StartStep) => plannerPrompt({ threadId: start.meta.threadId }),
plannerMetaSchema,
extract,
),
coder: createRole(
cursorAdapter,
async (start: StartStep) => coderPrompt({ threadId: start.meta.threadId }),
coderMetaSchema,
extract,
),
reviewer: createRole(
hermesAdapter,
async (start: StartStep) =>
reviewerPrompt({ threadId: start.meta.threadId, nerveRoot: cwd }),
reviewerMetaSchema,
extract,
),
tester: createRole(
hermesAdapter,
async (start: StartStep) =>
testerPrompt({ threadId: start.meta.threadId, nerveRoot: cwd }),
testerMetaSchema,
extract,
),
committer: buildCommitterRole({ nerveRoot: cwd }),
};
return {
name: "develop-sense",
roles,
moderator,
};
}

View File

@ -1,7 +1,5 @@
import { join } from "node:path";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createDevelopSenseWorkflow } from "@uncaged/nerve-workflow-meta";
import { buildSenseGenerator } from "./build.js";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
@ -13,19 +11,7 @@ if (!apiKey || !baseUrl) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createDevelopSenseWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
planner: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
coder: cursorAdapter,
},
const workflow = buildSenseGenerator({
extract: { provider: { apiKey, baseUrl, model } },
cwd: NERVE_ROOT,
});

View File

@ -0,0 +1,65 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator } from "@uncaged/nerve-core";
import type { PlannerMeta } from "./roles/planner/index.js";
import type { CoderMeta } from "./roles/coder/index.js";
import type { ReviewerMeta } from "./roles/reviewer/index.js";
import type { TesterMeta } from "./roles/tester/index.js";
import type { CommitterMeta } from "./roles/committer/index.js";
export type SenseMeta = {
planner: PlannerMeta;
coder: CoderMeta;
reviewer: ReviewerMeta;
tester: TesterMeta;
committer: CommitterMeta;
};
const MAX_CODER_ROUNDS = 20;
const MAX_TOTAL_REJECTIONS = 10;
function coderRounds(steps: { role: string }[]): number {
return steps.filter((s) => s.role === "coder").length;
}
function totalRejections(steps: { role: string; meta: unknown }[]): number {
return steps.filter((s) => {
if (s.role === "reviewer") return !(s.meta as Record<string, boolean>).approved;
if (s.role === "tester") return !(s.meta as Record<string, boolean>).passed;
if (s.role === "committer") return !(s.meta as Record<string, boolean>).success;
return false;
}).length;
}
function canRetryCoder(steps: { role: string; meta: unknown }[]): boolean {
return coderRounds(steps) < MAX_CODER_ROUNDS && totalRejections(steps) < MAX_TOTAL_REJECTIONS;
}
export const moderator: Moderator<SenseMeta> = (context) => {
if (context.steps.length === 0) return "planner";
const last = context.steps[context.steps.length - 1];
if (last.role === "planner") return "coder";
if (last.role === "coder") {
if (last.meta.filesCreated) return "reviewer";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "reviewer") {
if (last.meta.approved) return "tester";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "tester") {
if (last.meta.passed) return "committer";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "committer") {
if (last.meta.success) return END;
return canRetryCoder(context.steps) ? "coder" : END;
}
return END;
};

View File

@ -0,0 +1,21 @@
{
"name": "generate-sense-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild index.ts --bundle --platform=node --format=esm --outdir=dist --packages=external"
},
"dependencies": {
"@uncaged/nerve-adapter-cursor": "latest",
"@uncaged/nerve-adapter-hermes": "latest",
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const coderMetaSchema = z.object({
filesCreated: z.boolean().describe("true if the sense files were created"),
});
export type CoderMeta = z.infer<typeof coderMetaSchema>;

View File

@ -0,0 +1,31 @@
export function coderPrompt({ threadId }: { threadId: string }): string {
return `Read the workflow thread for the planner's sense design and any tester feedback: \`nerve thread ${threadId}\`
Read the nerve-dev skill for sense file structure and conventions: \`cat node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
## Your task
Implement (or fix) the sense the planner designed. If there is tester feedback in the thread, fix the issues it identified.
## Multi-step approach
You do NOT need to finish everything in one pass. You may return \`done: false\` to continue in the next iteration.
## File structure for each sense
- \`senses/<name>/src/index.ts\` — TypeScript compute source; import schema as \`./schema.ts\`
- \`senses/<name>/src/schema.ts\` — Drizzle schema (TypeScript)
- \`senses/<name>/migrations/\` — Drizzle migration files (at sense root, not inside src/)
- \`senses/<name>/package.json\` — with esbuild build script
- \`senses/<name>/index.js\` — bundled output generated by \`pnpm build\` (do NOT edit by hand)
Look at existing senses for the package.json template and patterns.
## When to return done: true
Return \`done: true\` ONLY when ALL of the following are true:
- All required files are created
- \`pnpm install --no-cache && pnpm build\` succeeds (run it!)
- \`nerve.yaml\` is updated with the sense config
Return \`done: false\` if you made progress but there is still work to do.`;
}

View File

@ -0,0 +1,91 @@
import { writeFileSync, mkdirSync } from "node:fs";
import { join } from "node:path";
import type { Role, RoleResult } from "@uncaged/nerve-core";
import { isDryRun, spawnSafe } from "@uncaged/nerve-workflow-utils";
export type CommitterMeta = {
success: boolean;
};
export type BuildCommitterDeps = {
nerveRoot: string;
};
function logPath(nerveRoot: string): string {
return join(nerveRoot, "logs", `committer-${Date.now()}.log`);
}
function writeLog(path: string, content: string): void {
mkdirSync(join(path, ".."), { recursive: true });
writeFileSync(path, content, "utf-8");
}
export function buildCommitterRole({ nerveRoot }: BuildCommitterDeps): Role<CommitterMeta> {
return async (start, _messages) => {
const dry = isDryRun(start);
const file = logPath(nerveRoot);
if (dry) {
writeLog(file, "[dry-run] committer skipped\n");
return {
content: `[dry-run] committer skipped — log: ${file}`,
meta: { success: true },
} satisfies RoleResult<CommitterMeta>;
}
const lines: string[] = [];
let success = true;
const run = async (cmd: string, args: string[]): Promise<boolean> => {
const r = await spawnSafe(cmd, args, {
cwd: nerveRoot,
env: null,
timeoutMs: 60_000,
dryRun: false,
abortSignal: null,
});
if (r.ok) {
lines.push(`$ ${cmd} ${args.join(" ")}`);
if (r.value.stdout) lines.push(r.value.stdout);
if (r.value.stderr) lines.push(r.value.stderr);
lines.push("");
return true;
}
const e = r.error;
lines.push(`$ ${cmd} ${args.join(" ")} — FAILED`);
if (e.kind === "non_zero_exit") {
lines.push(`exit ${e.exitCode}`);
if (e.stdout) lines.push(e.stdout);
if (e.stderr) lines.push(e.stderr);
} else if (e.kind === "timeout") {
lines.push("timeout");
if (e.stdout) lines.push(e.stdout);
if (e.stderr) lines.push(e.stderr);
} else if (e.kind === "spawn_failed") {
lines.push(e.message);
} else {
lines.push(e.kind === "aborted" ? "aborted" : "error");
}
lines.push("");
return false;
};
await run("git", ["add", "-A"]);
const committed = await run("git", ["commit", "-m", "chore(sense): auto-generated commit"]);
if (!committed) {
success = false;
} else {
const pushed = await run("git", ["push"]);
if (!pushed) success = false;
}
const log = lines.join("\n");
writeLog(file, log);
const summary = success ? "committed and pushed" : "commit/push failed — see log";
return {
content: `committer: ${summary}\nLog: ${file}`,
meta: { success },
} satisfies RoleResult<CommitterMeta>;
};
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const plannerMetaSchema = z.object({
senseName: z.string().describe("kebab-case sense name from the plan"),
});
export type PlannerMeta = z.infer<typeof plannerMetaSchema>;

View File

@ -0,0 +1,17 @@
export function plannerPrompt({ threadId }: { threadId: string }): string {
return `You are planning a new Nerve sense.
Read the workflow thread for the user's request: \`nerve thread ${threadId}\`
Read the nerve-dev skill for sense conventions: \`cat node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
Also look at existing senses in the \`senses/\` directory for patterns.
Pick a good kebab-case name for this sense. Produce a PLAN (not code) in markdown:
## Sense Design
### Name kebab-case
### Fields name, type (integer/real/text), description
### Compute Logic step-by-step, specific Node.js APIs or shell commands
### Trigger Config group, interval, throttle, timeout
Output ONLY the plan. Be precise and implementation-ready.`;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const reviewerMetaSchema = z.object({
approved: z.boolean().describe("true if the diff is clean and ready for tester validation"),
});
export type ReviewerMeta = z.infer<typeof reviewerMetaSchema>;

View File

@ -0,0 +1,38 @@
export function reviewerPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are a **code reviewer** for Nerve workflow changes. You run after the coder and before the tester.
**IMPORTANT: The Nerve workspace is at \`${nerveRoot}\`. Always \`cd ${nerveRoot}\` first.**
Read the workflow thread for context: \`nerve thread ${threadId}\`
Read project conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
## Your job static analysis of the git diff
Run these commands and analyze the output:
1. **\`cd ${nerveRoot} && git diff --stat\`** — see what files changed
2. **\`cd ${nerveRoot} && git diff\`** — read the actual diff
3. **\`cd ${nerveRoot} && git status --short\`** — check for untracked files
## Checklist
Review the diff against CONVENTIONS.md. Key things to catch:
### 🔴 Reject (approved: false) tell coder exactly what to fix
- **Garbage files**: anything listed under "What NOT to commit" in CONVENTIONS.md
- **Secrets/credentials**: API keys, tokens, passwords hardcoded in the diff
- **Unrelated changes**: files modified outside the scope of the task
- **Convention violations**: patterns that contradict CONVENTIONS.md (e.g. \`interface\` instead of \`type\`, \`class\`, dynamic \`import()\`, optional properties with \`?:\`)
### Approve (approved: true) no comment needed
- Diff is clean, focused, follows conventions
End with:
\`\`\`json
{ "approved": true }
\`\`\`
or
\`\`\`json
{ "approved": false }
\`\`\``;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const testerMetaSchema = z.object({
passed: z.boolean().describe("true if all e2e checks passed"),
});
export type TesterMeta = z.infer<typeof testerMetaSchema>;

View File

@ -0,0 +1,34 @@
export function testerPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are testing a newly created Nerve sense end-to-end.
**IMPORTANT: The Nerve workspace is at \`${nerveRoot}\`. All paths below are relative to this directory. Always \`cd ${nerveRoot}\` first.**
Read the workflow thread for context: \`nerve thread ${threadId}\`
Read the nerve-dev skill for expected file structure: \`cat ${nerveRoot}/node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
Verify the full lifecycle in this order:
1. **File check** all required sense files exist:
- \`senses/<name>/src/index.ts\`
- \`senses/<name>/src/schema.ts\`
- \`senses/<name>/migrations/\`
- \`senses/<name>/package.json\`
2. **Build** run inside the sense directory:
\`\`\`
cd ${nerveRoot}/senses/<name> && pnpm install --no-cache && pnpm build
\`\`\`
Must produce \`index.js\` at sense root without errors.
3. **Config check** \`nerve validate\` passes, confirming nerve.yaml is valid.
4. **Sense list** \`nerve sense list\` shows the sense.
5. **Trigger** \`nerve sense trigger <name>\` completes without error.
6. **Query** \`nerve sense query <name>\` — retry up to 20s until rows appear.
If any step fails, include the relevant error output.
Output a clear summary: what you checked, what passed, what failed, and why.`;
}

View File

@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": false,
"declaration": false,
"types": ["node"]
},
"include": ["./**/*.ts"]
}

1
workflows/develop-workflow/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist/

View File

@ -0,0 +1,70 @@
import type { StartStep, WorkflowDefinition } from "@uncaged/nerve-core";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { coderPrompt } from "./roles/coder/prompt.js";
import { coderMetaSchema } from "./roles/coder/index.js";
import { plannerPrompt } from "./roles/planner/prompt.js";
import { plannerMetaSchema } from "./roles/planner/index.js";
import { reviewerPrompt } from "./roles/reviewer/prompt.js";
import { reviewerMetaSchema } from "./roles/reviewer/index.js";
import { testerPrompt } from "./roles/tester/prompt.js";
import { testerMetaSchema } from "./roles/tester/index.js";
import { buildCommitterRole } from "./roles/committer/index.js";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
export type BuildWorkflowGeneratorDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
const CURSOR_TIMEOUT_MS = 300_000;
export function buildWorkflowGenerator({
extract,
nerveRoot,
}: BuildWorkflowGeneratorDeps): WorkflowDefinition<WorkflowMeta> {
const roles = {
planner: createRole(
createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
async (start: StartStep) => plannerPrompt({ threadId: start.meta.threadId }),
plannerMetaSchema,
extract,
),
coder: createRole(
cursorAdapter,
async (start: StartStep) => coderPrompt({ threadId: start.meta.threadId }),
coderMetaSchema,
extract,
),
reviewer: createRole(
hermesAdapter,
async (start: StartStep) =>
reviewerPrompt({ threadId: start.meta.threadId, nerveRoot }),
reviewerMetaSchema,
extract,
),
tester: createRole(
hermesAdapter,
async (start: StartStep) =>
testerPrompt({ threadId: start.meta.threadId, nerveRoot }),
testerMetaSchema,
extract,
),
committer: buildCommitterRole({ nerveRoot }),
};
return {
name: "develop-workflow",
roles,
moderator,
};
}

View File

@ -1,7 +1,5 @@
import { join } from "node:path";
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createDevelopWorkflowWorkflow } from "@uncaged/nerve-workflow-meta";
import { buildWorkflowGenerator } from "./build.js";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
@ -14,19 +12,7 @@ if (!apiKey || !baseUrl) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createDevelopWorkflowWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
planner: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
coder: cursorAdapter,
},
const workflow = buildWorkflowGenerator({
extract: { provider: { apiKey, baseUrl, model } },
nerveRoot: NERVE_ROOT,
});

View File

@ -0,0 +1,67 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator } from "@uncaged/nerve-core";
import type { PlannerMeta } from "./roles/planner/index.js";
import type { CoderMeta } from "./roles/coder/index.js";
import type { ReviewerMeta } from "./roles/reviewer/index.js";
import type { TesterMeta } from "./roles/tester/index.js";
import type { CommitterMeta } from "./roles/committer/index.js";
export type WorkflowMeta = {
planner: PlannerMeta;
coder: CoderMeta;
reviewer: ReviewerMeta;
tester: TesterMeta;
committer: CommitterMeta;
};
const MAX_CODER_ROUNDS = 20;
const MAX_TOTAL_REJECTIONS = 10;
function coderRounds(steps: { role: string }[]): number {
return steps.filter((s) => s.role === "coder").length;
}
function totalRejections(steps: { role: string; meta: unknown }[]): number {
return steps.filter((s) => {
if (s.role === "reviewer") return !(s.meta as Record<string, boolean>).approved;
if (s.role === "tester") return !(s.meta as Record<string, boolean>).passed;
if (s.role === "committer") return !(s.meta as Record<string, boolean>).success;
return false;
}).length;
}
function canRetryCoder(steps: { role: string; meta: unknown }[]): boolean {
return coderRounds(steps) < MAX_CODER_ROUNDS && totalRejections(steps) < MAX_TOTAL_REJECTIONS;
}
export const moderator: Moderator<WorkflowMeta> = (context) => {
if (context.steps.length === 0) return "planner";
const last = context.steps[context.steps.length - 1];
if (last.role === "planner") {
return last.meta.ready ? "coder" : END;
}
if (last.role === "coder") {
if (last.meta.done) return "reviewer";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "reviewer") {
if (last.meta.approved) return "tester";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "tester") {
if (last.meta.passed) return "committer";
return canRetryCoder(context.steps) ? "coder" : END;
}
if (last.role === "committer") {
if (last.meta.success) return END;
return canRetryCoder(context.steps) ? "coder" : END;
}
return END;
};

View File

@ -0,0 +1,21 @@
{
"name": "generate-workflow-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild index.ts --bundle --platform=node --format=esm --outdir=dist --packages=external"
},
"dependencies": {
"@uncaged/nerve-adapter-cursor": "latest",
"@uncaged/nerve-adapter-hermes": "latest",
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const coderMetaSchema = z.object({
done: z.boolean().describe("true if the workflow files were created and build passes"),
});
export type CoderMeta = z.infer<typeof coderMetaSchema>;

View File

@ -0,0 +1,51 @@
export function coderPrompt({ threadId }: { threadId: string }): string {
return `Read the workflow thread to get the planner's design and any reviewer/tester/committer feedback: \`nerve thread ${threadId}\`
Read the nerve-dev skill for workflow file structure and conventions: \`cat node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
Also look at existing workflows in the \`workflows/\` directory for patterns.
## Your task
Implement the planner's design. This may be **creating a new workflow** or **modifying an existing one**. If there is reviewer, tester, or committer feedback in the thread, fix the issues they identified.
**IMPORTANT:** The thread contains both the **initial user prompt** (the first message) and the **planner's design**. Read both carefully:
- The **initial prompt** contains the user's specific requirements for role behavior, tools to use, and acceptance criteria
- The **planner's design** contains the architecture, file structure, and routing logic
- When writing role prompts, follow the user's behavioral requirements from the initial prompt do not invent your own interpretation
## Multi-step approach
You do NOT need to finish everything in one pass. You may return \`done: false\` to continue in the next iteration. For example:
1. First pass: scaffold files / make structural changes
2. Second pass: implement role logic
3. Third pass: fix build/lint errors
## Workflow file structure
Each workflow must have:
- \`workflows/<name>/index.ts\` — WorkflowDefinition default export
- \`workflows/<name>/build.ts\` — factory function
- \`workflows/<name>/moderator.ts\` — moderator + meta types
- \`workflows/<name>/roles/<role>/index.ts\` — role build function
- \`workflows/<name>/roles/<role>/prompt.ts\` — prompt pure function
- \`workflows/<name>/package.json\` — with esbuild build script
- \`workflows/<name>/tsconfig.json\` — TypeScript config
For **new workflows**, also update \`nerve.yaml\` with \`workflows.<name>\`.
## Rules
- Keep the WorkflowDefinition<WorkflowMeta> pattern
- No dynamic import()
- Use types (not interfaces)
- Meta should be simple routing signals (single boolean per role)
- Write compile-ready TypeScript
## When to return done: true
Return \`done: true\` ONLY when ALL of the following are true:
- All changes from the plan are implemented
- \`cd workflows/<name> && pnpm install --no-cache && pnpm build\` succeeds (run it!)
- No lint or type errors remain
Return \`done: false\` if you made progress but there is still work to do, or if build/lint has errors you plan to fix in the next iteration.`;
}

View File

@ -0,0 +1,92 @@
import { writeFileSync, mkdirSync } from "node:fs";
import { join } from "node:path";
import type { Role, RoleResult } from "@uncaged/nerve-core";
import { isDryRun, spawnSafe } from "@uncaged/nerve-workflow-utils";
export type CommitterMeta = {
success: boolean;
};
export type BuildCommitterDeps = {
nerveRoot: string;
};
function logPath(nerveRoot: string): string {
return join(nerveRoot, "logs", `committer-${Date.now()}.log`);
}
function writeLog(path: string, content: string): void {
mkdirSync(join(path, ".."), { recursive: true });
writeFileSync(path, content, "utf-8");
}
export function buildCommitterRole({ nerveRoot }: BuildCommitterDeps): Role<CommitterMeta> {
return async (start, _messages) => {
const dry = isDryRun(start);
const file = logPath(nerveRoot);
if (dry) {
writeLog(file, "[dry-run] committer skipped\n");
return {
content: `[dry-run] committer skipped — log: ${file}`,
meta: { success: true },
} satisfies RoleResult<CommitterMeta>;
}
const lines: string[] = [];
let success = true;
const run = async (cmd: string, args: string[]): Promise<boolean> => {
const r = await spawnSafe(cmd, args, {
cwd: nerveRoot,
env: null,
timeoutMs: 60_000,
dryRun: false,
abortSignal: null,
});
if (r.ok) {
lines.push(`$ ${cmd} ${args.join(" ")}`);
if (r.value.stdout) lines.push(r.value.stdout);
if (r.value.stderr) lines.push(r.value.stderr);
lines.push("");
return true;
}
const e = r.error;
lines.push(`$ ${cmd} ${args.join(" ")} — FAILED`);
if (e.kind === "non_zero_exit") {
lines.push(`exit ${e.exitCode}`);
if (e.stdout) lines.push(e.stdout);
if (e.stderr) lines.push(e.stderr);
} else if (e.kind === "timeout") {
lines.push("timeout");
if (e.stdout) lines.push(e.stdout);
if (e.stderr) lines.push(e.stderr);
} else if (e.kind === "spawn_failed") {
lines.push(e.message);
} else {
lines.push(e.kind === "aborted" ? "aborted" : "error");
}
lines.push("");
return false;
};
await run("git", ["add", "-A"]);
// Use a generic message; git diff will show what actually changed
const committed = await run("git", ["commit", "-m", "chore(workflow): auto-generated commit"]);
if (!committed) {
success = false;
} else {
const pushed = await run("git", ["push"]);
if (!pushed) success = false;
}
const log = lines.join("\n");
writeLog(file, log);
const summary = success ? "committed and pushed" : "commit/push failed — see log";
return {
content: `committer: ${summary}\nLog: ${file}`,
meta: { success },
} satisfies RoleResult<CommitterMeta>;
};
}

View File

@ -0,0 +1,35 @@
export type CommitterPromptParams = {
nerveRoot: string;
workflowName: string;
userPrompt: string;
testerReason: string;
};
export function committerPrompt({
nerveRoot,
workflowName,
userPrompt,
testerReason,
}: CommitterPromptParams): string {
return `You are a git committer subagent for Nerve workflow generation.
Repository root: ${nerveRoot}
Goal:
- Commit and push generated workflow "${workflowName}".
- Handle dirty worktree safely (do not discard unrelated user edits).
- Detect default branch automatically.
- Create a focused branch for this workflow update.
- Stage only workflow files and required config updates.
Context:
- User prompt summary: ${userPrompt.slice(0, 500)}
- Tester result: ${testerReason}
Expected output format:
BRANCH=<branch-or-empty>
COMMIT=<hash-or-empty>
PUSHED=<true|false|unknown>
LOG_START
<details>
LOG_END`;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const plannerMetaSchema = z.object({
ready: z.boolean().describe("true if requirements are clear and a workflow can be implemented"),
});
export type PlannerMeta = z.infer<typeof plannerMetaSchema>;

View File

@ -0,0 +1,46 @@
export function plannerPrompt({ threadId }: { threadId: string }): string {
return `You are a Nerve workflow planner. You can **create new workflows** or **modify existing ones**.
Read the workflow thread for the user's request: \`nerve thread ${threadId}\`
Read the nerve-dev skill for workflow conventions: \`cat node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
List existing workflows: \`ls workflows/\`
## Determine the task type
1. If the user wants to **modify an existing workflow** read its current code (\`cat workflows/<name>/moderator.ts\`, \`cat workflows/<name>/build.ts\`, \`ls workflows/<name>/roles/\`, etc.) and understand its current structure before planning changes.
2. If the user wants to **create a new workflow** look at existing workflows in \`workflows/\` for patterns to follow.
## Produce a PLAN (not code) in markdown
For **new workflows**:
- Workflow name (kebab-case)
- Roles list (name, purpose, tool)
- Flow transitions / moderator routing logic
- Validation loops design
- External dependencies
- Data flow between roles
For **modifications to existing workflows**:
- Workflow name (existing)
- What changes are needed and why
- Files to add/modify/delete
- Impact on moderator routing logic (this workflow's typical order is planner coder reviewer tester committer)
- Backward compatibility considerations (if any)
**For every role (new or modified)**, include a **Role Behavior** section that describes:
- What the role should do, check, or produce
- What tools or commands it should use
- What criteria determine its meta output (e.g. approved/passed/done)
- Preserve the user's specific requirements verbatim do NOT summarize away details
If requirements are NOT clear, describe what is missing or ambiguous.
End your response with a JSON block:
\`\`\`json
{ "ready": true }
\`\`\`
or
\`\`\`json
{ "ready": false }
\`\`\``;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const reviewerMetaSchema = z.object({
approved: z.boolean().describe("true if the diff is clean and ready for tester validation"),
});
export type ReviewerMeta = z.infer<typeof reviewerMetaSchema>;

View File

@ -0,0 +1,38 @@
export function reviewerPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are a **code reviewer** for Nerve workflow changes. You run after the coder and before the tester.
**IMPORTANT: The Nerve workspace is at \`${nerveRoot}\`. Always \`cd ${nerveRoot}\` first.**
Read the workflow thread for context: \`nerve thread ${threadId}\`
Read project conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
## Your job static analysis of the git diff
Run these commands and analyze the output:
1. **\`cd ${nerveRoot} && git diff --stat\`** — see what files changed
2. **\`cd ${nerveRoot} && git diff\`** — read the actual diff
3. **\`cd ${nerveRoot} && git status --short\`** — check for untracked files
## Checklist
Review the diff against CONVENTIONS.md. Key things to catch:
### 🔴 Reject (approved: false) tell coder exactly what to fix
- **Garbage files**: anything listed under "What NOT to commit" in CONVENTIONS.md
- **Secrets/credentials**: API keys, tokens, passwords hardcoded in the diff
- **Unrelated changes**: files modified outside the scope of the task
- **Convention violations**: patterns that contradict CONVENTIONS.md (e.g. \`interface\` instead of \`type\`, \`class\`, dynamic \`import()\`, optional properties with \`?:\`)
### Approve (approved: true) no comment needed
- Diff is clean, focused, follows conventions
End with:
\`\`\`json
{ "approved": true }
\`\`\`
or
\`\`\`json
{ "approved": false }
\`\`\``;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const testerMetaSchema = z.object({
passed: z.boolean().describe("true if all validation checks passed"),
});
export type TesterMeta = z.infer<typeof testerMetaSchema>;

View File

@ -0,0 +1,35 @@
export function testerPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are testing a Nerve workflow — either newly created or recently modified.
**IMPORTANT: The Nerve workspace is at \`${nerveRoot}\`. All paths below are relative to this directory. Always \`cd ${nerveRoot}\` first.**
Read the workflow thread for context: \`nerve thread ${threadId}\`
Read the nerve-dev skill for expected file structure: \`cat ${nerveRoot}/node_modules/@uncaged/nerve-skills/nerve-dev/SKILL.md\`
Get the workflow name from the thread (the planner's output).
Verify the full lifecycle in this order:
1. **File check** all required workflow files exist (under \`${nerveRoot}/\`):
- \`workflows/<name>/index.ts\`
- \`workflows/<name>/build.ts\`
- \`workflows/<name>/moderator.ts\`
- \`workflows/<name>/roles/\` with subdirectories
- \`workflows/<name>/package.json\`
2. **Build** run inside the workflow directory:
\`\`\`
cd ${nerveRoot}/workflows/<name> && pnpm install --no-cache && pnpm build
\`\`\`
Must produce \`dist/index.js\` without errors.
3. **Config check** \`cd ${nerveRoot} && nerve validate\` passes, confirming nerve.yaml is valid.
4. **Workflow list** \`nerve workflow list\` shows the workflow.
5. **Trigger test** \`nerve workflow trigger <name> --dry-run\` if available, otherwise just confirm the workflow appears in \`nerve workflow status\`.
If any step fails, include the relevant error output.
Output a clear summary: what you checked, what passed, what failed, and why.`;
}

View File

@ -7,13 +7,7 @@
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"allowImportingTsExtensions": true,
"types": ["node"]
},
"include": [
"senses/**/*.ts",
"workflows/**/*.ts",
"scripts/**/*.ts",
"workflows/_shared/**/*.ts"
]
"include": ["./**/*.ts"]
}

View File

@ -1,33 +0,0 @@
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createLlmAdapter } from "@uncaged/nerve-workflow-utils";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
import { createAnswererRole } from "./roles/answerer.js";
import { createExplorerRole } from "./roles/explorer.js";
import { createQuestionerRole } from "./roles/questioner.js";
export type CreateKnowledgeExtractionDeps = {
defaultAdapter: AgentFn;
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
extract: LlmExtractorConfig;
};
export function createKnowledgeExtractionWorkflow({
defaultAdapter,
adapters,
extract,
}: CreateKnowledgeExtractionDeps): WorkflowDefinition<WorkflowMeta> {
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
const llmAdapter = createLlmAdapter(extract.provider);
return {
name: "extract-knowledge",
roles: {
questioner: createQuestionerRole(adapters?.questioner ?? llmAdapter, { extract }),
answerer: createAnswererRole(adapters?.answerer ?? llmAdapter, { extract }),
explorer: createExplorerRole(a("explorer"), { extract }),
},
moderator,
};
}

View File

@ -1,30 +0,0 @@
import { join } from "node:path";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createKnowledgeExtractionWorkflow } from "./build.js";
import { resolveDashScopeProvider } from "../solve-issue/lib/provider.js";
const HOME = process.env.HOME ?? "/home/azureuser";
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
const provider = await resolveDashScopeProvider(NERVE_ROOT);
if (provider === null) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createKnowledgeExtractionWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
explorer: createCursorAdapter({
type: "cursor",
model: "claude-sonnet-4",
timeout: CURSOR_TIMEOUT_MS,
}),
},
extract: { provider },
});
export default workflow;

View File

@ -1,74 +0,0 @@
import type { Dirent } from "node:fs";
import { readdir } from "node:fs/promises";
import { join } from "node:path";
import type { StartStep, WorkflowMessage } from "@uncaged/nerve-core";
import type { ExplorerMeta } from "../roles/explorer.js";
import type { QuestionerMeta } from "../roles/questioner.js";
async function walkMarkdownFiles(rootDir: string, base: string): Promise<string[]> {
const out: string[] = [];
let entries: Dirent[];
try {
entries = (await readdir(rootDir, { withFileTypes: true })) as Dirent[];
} catch {
return out;
}
for (const e of entries) {
const name = e.name;
const rel = base ? `${base}/${name}` : name;
const full = join(rootDir, name);
if (e.isDirectory()) {
out.push(...(await walkMarkdownFiles(full, rel)));
} else if (e.isFile() && name.endsWith(".md")) {
out.push(rel.replace(/\\/g, "/"));
}
}
return out;
}
/** Enumerate all markdown files under `.knowledge/` as repo-relative paths; seed line first if present. */
export async function bootstrapKnowledgeQueue(cwd: string, startContent: string): Promise<string[]> {
const knowledgeDir = join(cwd, ".knowledge");
const relFiles = await walkMarkdownFiles(knowledgeDir, "");
const paths = relFiles.map((f) => `.knowledge/${f}`);
const seed = startContent.trim().split(/\r?\n/u)[0]?.trim() ?? "";
if (paths.length === 0 && seed.length > 0) {
return [seed];
}
if (seed.length > 0 && paths.includes(seed)) {
return [seed, ...paths.filter((p) => p !== seed)];
}
if (seed.length > 0 && !paths.includes(seed)) {
return [seed, ...paths];
}
return [...paths].sort();
}
function lastIndexOfRole(messages: WorkflowMessage[], role: string): number {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === role) return i;
}
return -1;
}
/** Next queue for questioner: bootstrap, or continue after answerer / explorer. */
export async function resolveQueueForQuestioner(
start: StartStep,
messages: WorkflowMessage[],
cwd: string,
): Promise<string[]> {
const lastQi = lastIndexOfRole(messages, "questioner");
if (lastQi === -1) {
return bootstrapKnowledgeQueue(cwd, start.content);
}
const qMeta = messages[lastQi].meta as QuestionerMeta;
const tail = messages.slice(lastQi + 1);
const explorerMsg = tail.find((m) => m.role === "explorer");
if (explorerMsg) {
const eMeta = explorerMsg.meta as ExplorerMeta;
return [...qMeta.remaining_queue, ...eMeta.new_cards];
}
return qMeta.remaining_queue;
}

View File

@ -1,21 +0,0 @@
import type { StartStep } from "@uncaged/nerve-core";
type StartMetaWithWorkdir = StartStep["meta"] & { workdir?: string | null };
/**
* Resolve the target repo working directory.
* Priority: start.meta.workdir prompt second line (if absolute path) cwd.
*/
export function resolveWorkdir(start: StartStep): string {
const m = start.meta as StartMetaWithWorkdir;
if (m.workdir) return m.workdir;
// Allow prompt to carry workdir on the second line: "seed\n/abs/path"
const lines = start.content.split(/\r?\n/);
if (lines.length >= 2) {
const candidate = lines[1]!.trim();
if (candidate.startsWith("/")) return candidate;
}
return process.cwd();
}

View File

@ -1,84 +0,0 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator, ThreadContext } from "@uncaged/nerve-core";
import type { AnswererMeta } from "./roles/answerer.js";
import type { ExplorerMeta } from "./roles/explorer.js";
import type { QuestionerMeta } from "./roles/questioner.js";
export type WorkflowMeta = {
questioner: QuestionerMeta;
answerer: AnswererMeta;
explorer: ExplorerMeta;
};
type Steps = ThreadContext<WorkflowMeta>["steps"];
function lastQuestionerRemaining(steps: Steps): QuestionerMeta | undefined {
for (let i = steps.length - 1; i >= 0; i--) {
const s = steps[i];
if (s.role === "questioner") return s.meta;
}
return undefined;
}
/** End when the last two explorer invocations both added no new cards (issue #266 stagnation rule). */
function lastTwoExplorerRunsBothEmpty(steps: Steps): boolean {
const explorerSteps = steps.filter((s) => s.role === "explorer");
if (explorerSteps.length < 2) return false;
const e1 = explorerSteps[explorerSteps.length - 1].meta as ExplorerMeta;
const e2 = explorerSteps[explorerSteps.length - 2].meta as ExplorerMeta;
return e1.new_cards.length === 0 && e2.new_cards.length === 0;
}
function queueAfterSkippedExplorer(steps: Steps): string[] {
const q = lastQuestionerRemaining(steps);
return q?.remaining_queue ?? [];
}
function queueAfterExplorerStep(steps: Steps): string[] {
const last = steps[steps.length - 1];
if (!last || last.role !== "explorer") return [];
const q = lastQuestionerRemaining(steps);
if (!q) return [];
const e = last.meta as ExplorerMeta;
return [...q.remaining_queue, ...e.new_cards];
}
export const moderator: Moderator<WorkflowMeta> = (context) => {
const { steps } = context;
if (steps.length === 0) {
return "questioner";
}
const last = steps[steps.length - 1];
if (last.role === "questioner") {
return "answerer";
}
if (last.role === "answerer") {
const am = last.meta as AnswererMeta;
if (am.has_unanswered) {
return "explorer";
}
const q = queueAfterSkippedExplorer(steps);
if (q.length === 0) {
return END;
}
return "questioner";
}
if (last.role === "explorer") {
if (lastTwoExplorerRunsBothEmpty(steps)) {
return END;
}
const q = queueAfterExplorerStep(steps);
if (q.length === 0) {
return END;
}
return "questioner";
}
return END;
};

View File

@ -1,102 +0,0 @@
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, nerveCommandEnv, spawnSafe } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveWorkdir } from "../lib/workdir.js";
import type { QuestionerMeta } from "./questioner.js";
export const answererMetaSchema = z.object({
results: z.array(
z.object({
id: z.string(),
found: z.boolean(),
source: z.string(),
note: z.string(),
}),
),
has_unanswered: z.boolean(),
});
export type AnswererMeta = z.infer<typeof answererMetaSchema>;
export type CreateAnswererRoleDeps = {
extract: LlmExtractorConfig;
};
function lastQuestionerMeta(messages: WorkflowMessage[]): QuestionerMeta | undefined {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === "questioner") {
return messages[i].meta as QuestionerMeta;
}
}
return undefined;
}
export async function answererPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
throw new Error("answerer: prompt invoked without questioner questions — wrapped role should short-circuit");
}
const blocks: string[] = [];
for (const q of qm.questions) {
if ((ctx.start.meta as Record<string, unknown>).dryRun) {
blocks.push(`### ${q.id}\n[dryRun] skipped nerve knowledge query\n`);
continue;
}
const res = await spawnSafe(
"nerve",
["knowledge", "query", q.question],
{
cwd,
env: nerveCommandEnv(),
timeoutMs: 120_000,
dryRun: false,
abortSignal: null,
},
);
if (res.ok) {
blocks.push(`### ${q.id} (${q.domain})\nQuestion: ${q.question}\n---\n${res.value.stdout}\n`);
} else {
const err = res.error;
const detail =
err.kind === "non_zero_exit"
? `exit ${err.exitCode}\n${err.stderr}`
: err.kind === "timeout"
? `timeout\n${err.stderr}`
: err.kind === "spawn_failed"
? err.message
: "aborted";
blocks.push(`### ${q.id}\nnerve knowledge query failed: ${detail}\n`);
}
}
return [
"You are the **answerer**. You MUST NOT read repository source code — only the CLI retrieval excerpts below.",
"For each question id, decide whether the knowledge base already answers it.",
"Set found=true only when the excerpt supports a confident answer; otherwise found=false.",
"Set has_unanswered=true if any question remains unanswered by the knowledge base.",
"",
...blocks,
].join("\n");
}
export function createAnswererRole(adapter: AgentFn, { extract }: CreateAnswererRoleDeps): Role<AnswererMeta> {
const inner = createRole(adapter, answererPrompt, answererMetaSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
return {
content: "answerer: no questions from questioner; skipping CLI lookup.",
meta: { results: [], has_unanswered: false },
};
}
return inner(ctx);
};
}

View File

@ -1,93 +0,0 @@
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveWorkdir } from "../lib/workdir.js";
import type { AnswererMeta } from "./answerer.js";
import type { QuestionerMeta } from "./questioner.js";
export const explorerMetaSchema = z.object({
patches: z.array(
z.object({
card: z.string(),
section: z.string(),
}),
),
new_cards: z.array(z.string()),
});
export type ExplorerMeta = z.infer<typeof explorerMetaSchema>;
export type CreateExplorerRoleDeps = {
extract: LlmExtractorConfig;
};
function lastMeta<M>(messages: WorkflowMessage[], role: string): M | undefined {
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].role === role) {
return messages[i].meta as M;
}
}
return undefined;
}
export function explorerPrompt(ctx: ThreadContext): string {
const messages = ctx.steps as unknown as WorkflowMessage[];
const threadId = ctx.start.meta.threadId;
const qm = lastMeta<QuestionerMeta>(messages, "questioner");
const am = lastMeta<AnswererMeta>(messages, "answerer");
const cwd = resolveWorkdir(ctx.start);
const unanswered =
am?.results.filter((r) => !r.found).map((r) => r.id) ?? [];
return `You are the **explorer** in an extract-knowledge workflow.
## Context
- Thread: \`nerve thread ${threadId}\`
- Working directory (repo root for paths): ${cwd}
- Current knowledge card (questioner): ${qm?.card ?? "(unknown)"}
## Unanswered question ids
${JSON.stringify(unanswered)}
Use the prior answerer results in the thread to map ids to full question text when you read messages above.
## Task
For each unanswered question, **read the codebase** as needed, then either:
- Add a new markdown file under \`.knowledge/\`, or
- Patch an existing card (prefer updating the card listed above when appropriate).
After any write or patch to \`.knowledge\`, run:
\`\`\`bash
nerve knowledge sync
\`\`\`
from this repo root (${cwd}), and fix failures until sync succeeds.
## Output meta
Report \`patches\` as { card, section } entries for cards you edited (section is a short heading or path hint).
Report \`new_cards\` as repo-relative paths for brand-new files you created (e.g. \`.knowledge/new-topic.md\`).
Do not claim work you did not perform.`;
}
export function createExplorerRole(
adapter: AgentFn,
{ extract }: CreateExplorerRoleDeps,
): Role<ExplorerMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => explorerPrompt(ctx),
explorerMetaSchema,
extract,
);
}

View File

@ -1,108 +0,0 @@
import { readFile } from "node:fs/promises";
import { join } from "node:path";
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveQueueForQuestioner } from "../lib/knowledge-queue.js";
import { resolveWorkdir } from "../lib/workdir.js";
const questionerExtractSchema = z.object({
questions: z
.array(
z.object({
id: z.string(),
question: z.string(),
domain: z.string(),
}),
)
.length(5),
});
export type QuestionerMeta = {
/** Empty when no .knowledge cards and no work to do. */
card: string;
questions: { id: string; question: string; domain: string }[];
remaining_queue: string[];
};
export type CreateQuestionerRoleDeps = {
extract: LlmExtractorConfig;
};
function questionerSystem(): string {
return `You are the **questioner** in an extract-knowledge workflow.
Read the given markdown knowledge card. Propose exactly **five** technical questions that are **not** already answered or covered by that card.
Rules:
- Questions must be concrete and technical.
- Each question needs a stable string id (e.g. q1, q2, q3, q4, q5), a short domain label (e.g. routing, storage), and the question text.
- Do not assume access to other files or tools reason only from the card content shown.`;
}
function questionerUser(card: string, cardBody: string, remainingHint: string[]): string {
return `Current card path: ${card}
Remaining queue after this card (paths, may be empty): ${JSON.stringify(remainingHint)}
--- Card content ---
${cardBody}`;
}
export async function questionerPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
if (queue.length === 0) {
throw new Error(
"questioner: prompt invoked with empty queue — wrapped role should short-circuit before LLM",
);
}
const card = queue[0]!;
const remaining_queue = queue.slice(1);
let cardBody: string;
try {
cardBody = await readFile(join(cwd, card), "utf8");
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
throw new Error(`questioner: failed to read ${card}: ${msg}`);
}
return `${questionerSystem()}\n\n${questionerUser(card, cardBody, remaining_queue)}`;
}
export function createQuestionerRole(adapter: AgentFn, { extract }: CreateQuestionerRoleDeps): Role<QuestionerMeta> {
const inner = createRole(adapter, questionerPrompt, questionerExtractSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
if (queue.length === 0) {
return {
content:
"questioner: no `.knowledge` markdown files found and no seed path in the trigger prompt; queue is empty.",
meta: {
card: "",
questions: [],
remaining_queue: [],
},
};
}
const card = queue[0]!;
const remaining_queue = queue.slice(1);
const r = await inner(ctx);
return {
content: r.content,
meta: {
card,
questions: r.meta.questions,
remaining_queue,
},
};
};
}

1
workflows/solve-issue/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
dist/

View File

@ -1,42 +1,62 @@
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
import type { StartStep, WorkflowDefinition } from "@uncaged/nerve-core";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
import { createCommitterRole } from "./roles/committer.js";
import { createImplementRole } from "./roles/implement.js";
import { createPlanRole } from "./roles/plan.js";
import { createPrepareRole } from "./roles/prepare.js";
import { createPublishRole } from "./roles/publish.js";
import { createReadIssueRole } from "./roles/read-issue.js";
import { createReviewRole } from "./roles/review.js";
import { createTestRole } from "./roles/test.js";
import { buildImplementRole } from "./roles/implement/index.js";
import { buildPlanRole } from "./roles/plan/index.js";
import { prepareMetaSchema } from "./roles/prepare/index.js";
import { preparePrompt } from "./roles/prepare/prompt.js";
import { buildPublishRole } from "./roles/publish/index.js";
import { readIssueMetaSchema } from "./roles/read-issue/index.js";
import { readIssuePrompt } from "./roles/read-issue/prompt.js";
import { reviewMetaSchema } from "./roles/review/index.js";
import { reviewPrompt } from "./roles/review/prompt.js";
import { testMetaSchema } from "./roles/test/index.js";
import { testPrompt } from "./roles/test/prompt.js";
export type CreateSolveIssueDeps = {
defaultAdapter: AgentFn;
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
export type BuildSolveIssueDeps = {
nerveRoot: string;
extract: LlmExtractorConfig;
};
export function createSolveIssueWorkflow({
defaultAdapter,
adapters,
export function buildSolveIssue({
nerveRoot,
extract,
}: CreateSolveIssueDeps): WorkflowDefinition<WorkflowMeta> {
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
}: BuildSolveIssueDeps): WorkflowDefinition<WorkflowMeta> {
return {
name: "solve-issue",
roles: {
"read-issue": createReadIssueRole(a("read-issue"), extract),
prepare: createPrepareRole(a("prepare"), extract),
plan: createPlanRole(a("plan"), { extract, nerveRoot }),
implement: createImplementRole(a("implement"), { extract, nerveRoot }),
committer: createCommitterRole(a("committer"), extract),
review: createReviewRole(a("review"), extract, nerveRoot),
test: createTestRole(a("test"), extract),
publish: createPublishRole(a("publish"), { extract, nerveRoot }),
"read-issue": createRole(
hermesAdapter,
async (start: StartStep) => readIssuePrompt({ threadId: start.meta.threadId }),
readIssueMetaSchema,
extract,
),
prepare: createRole(
hermesAdapter,
async (start: StartStep) => preparePrompt({ threadId: start.meta.threadId }),
prepareMetaSchema,
extract,
),
plan: buildPlanRole({ extract, nerveRoot }),
implement: buildImplementRole({ extract, nerveRoot }),
review: createRole(
hermesAdapter,
async (start: StartStep) =>
reviewPrompt({ threadId: start.meta.threadId, nerveRoot }),
reviewMetaSchema,
extract,
),
test: createRole(
hermesAdapter,
async (start: StartStep) => testPrompt({ threadId: start.meta.threadId }),
testMetaSchema,
extract,
),
publish: buildPublishRole({ extract, nerveRoot }),
},
moderator,
};

View File

@ -1,7 +1,5 @@
import { join } from "node:path";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import { createSolveIssueWorkflow } from "./build.js";
import { buildSolveIssue } from "./build.js";
import { resolveDashScopeProvider } from "./lib/provider.js";
const HOME = process.env.HOME ?? "/home/azureuser";
@ -13,25 +11,6 @@ if (provider === null) {
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
}
const CURSOR_TIMEOUT_MS = 300_000;
const workflow = createSolveIssueWorkflow({
defaultAdapter: hermesAdapter,
adapters: {
plan: createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
implement: createCursorAdapter({
type: "cursor",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
},
nerveRoot: NERVE_ROOT,
extract: { provider },
});
const workflow = buildSolveIssue({ nerveRoot: NERVE_ROOT, extract: { provider } });
export default workflow;

View File

@ -1,5 +1,5 @@
import { join } from "node:path";
import type { RoleStep, WorkflowMessage } from "@uncaged/nerve-core";
import type { WorkflowMessage } from "@uncaged/nerve-core";
type SolveIssueParse = {
host: string;

View File

@ -1,20 +1,18 @@
import { END } from "@uncaged/nerve-core";
import type { Moderator } from "@uncaged/nerve-core";
import type { ReadIssueMeta } from "./roles/read-issue.js";
import type { PrepareMeta } from "./roles/prepare.js";
import type { PlanMeta } from "./roles/plan.js";
import type { ImplementMeta } from "./roles/implement.js";
import type { CommitterMeta } from "./roles/committer.js";
import type { ReviewMeta } from "./roles/review.js";
import type { TestMeta } from "./roles/test.js";
import type { PublishMeta } from "./roles/publish.js";
import type { ReadIssueMeta } from "./roles/read-issue/index.js";
import type { PrepareMeta } from "./roles/prepare/index.js";
import type { PlanMeta } from "./roles/plan/index.js";
import type { ImplementMeta } from "./roles/implement/index.js";
import type { ReviewMeta } from "./roles/review/index.js";
import type { TestMeta } from "./roles/test/index.js";
import type { PublishMeta } from "./roles/publish/index.js";
export type WorkflowMeta = {
"read-issue": ReadIssueMeta;
prepare: PrepareMeta;
plan: PlanMeta;
implement: ImplementMeta;
committer: CommitterMeta;
review: ReviewMeta;
test: TestMeta;
publish: PublishMeta;
@ -31,7 +29,6 @@ function totalRejections(steps: { role: string; meta: unknown }[]): number {
return steps.filter((s) => {
if (s.role === "review") return !(s.meta as Record<string, boolean>).approved;
if (s.role === "test") return !(s.meta as Record<string, boolean>).passed;
if (s.role === "committer") return !(s.meta as Record<string, boolean>).committed;
if (s.role === "publish") return !(s.meta as Record<string, boolean>).success;
return false;
}).length;
@ -62,13 +59,6 @@ export const moderator: Moderator<WorkflowMeta> = (context) => {
if (last.role === "implement") {
if (last.meta.done) {
return "committer";
}
return canRetryImplement(context.steps) ? "implement" : END;
}
if (last.role === "committer") {
if (last.meta.committed) {
return "review";
}
return canRetryImplement(context.steps) ? "implement" : END;

View File

@ -0,0 +1,21 @@
{
"name": "solve-issue-workflow",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"build": "esbuild index.ts --bundle --platform=node --format=esm --outdir=dist --packages=external"
},
"dependencies": {
"@uncaged/nerve-adapter-cursor": "latest",
"@uncaged/nerve-adapter-hermes": "latest",
"@uncaged/nerve-core": "latest",
"@uncaged/nerve-workflow-utils": "latest",
"zod": "^4.3.6"
},
"devDependencies": {
"@types/node": "^22.0.0",
"esbuild": "^0.27.0",
"typescript": "^5.7.0"
}
}

View File

@ -1,57 +0,0 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, decorateRole, withDryRun, onFail } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function committerPrompt({ threadId }: { threadId: string }): string {
return `You are the committer agent. The **implement** step finished with a passing build; your job is to branch, commit, and push.
1. Read the workflow thread: \`nerve thread show ${threadId}\` — understand what was planned, implemented, and reviewed.
2. In the thread, locate \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\`. From them you need issue **number**, **title** (for the branch slug), repo **path**, and **defaultBranch**.
3. \`cd\` to the repo **path** from the markers. Optionally read \`CONVENTIONS.md\` in that repo root if present.
4. Run \`git rev-parse --abbrev-ref HEAD\` and compare with **defaultBranch** from the markers. Implement leaves changes uncommitted on the default branch — you should be on that branch with a dirty working tree. If you are not on the default branch, or the tree is clean when you expected changes, set **committed** to false and explain.
5. Run \`git status\`. If there is nothing to commit, set **committed** to false and explain.
6. Create a feature branch (do not commit directly on the default branch if it would mix unrelated work):
- Name: \`fix/<number>-<short-slug>\` for fixes, or \`feat/<number>-<short-slug>\` if the issue is clearly a feature.
- **slug**: lowercase, hyphens only, short (from issue title words).
- Example: \`git checkout -b fix/42-auth-timeout\`
7. \`git add -A\`
8. Write a **conventional commit** message describing what changed and why, using the thread context.
9. \`git commit -m "<message>"\` — do NOT pass \`--author\`, use repo git config.
10. \`git push -u origin <branch-name>\`
**committed=true** only if branch was created, commit succeeded, and **push** succeeded.
End your reply with a JSON line:
\`\`\`json
{ "committed": true }
\`\`\`
or
\`\`\`json
{ "committed": false }
\`\`\``;
}
export const committerMetaSchema = z.object({
committed: z
.boolean()
.describe("true if branch created, changes committed, and pushed successfully"),
});
export type CommitterMeta = z.infer<typeof committerMetaSchema>;
export function createCommitterRole(
adapter: AgentFn,
extract: LlmExtractorConfig,
): Role<CommitterMeta> {
const inner = createRole(
adapter,
async (ctx: ThreadContext) => committerPrompt({ threadId: ctx.start.meta.threadId }),
committerMetaSchema,
extract,
);
return decorateRole(inner, [
withDryRun({ label: "committer", meta: { committed: true } as CommitterMeta }),
onFail({ label: "committer", meta: { committed: false } as CommitterMeta }),
]) as Role<CommitterMeta>;
}

View File

@ -1,86 +0,0 @@
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../lib/repo-context.js";
function buildImplementPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **implement** agent. You apply code changes for the issue.
Read workflow context (plan, reviewer/test feedback): \`nerve thread show ${threadId}\`
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
Your cwd is the target repository.
## Requirements
1. Implement the planned changes; address reviewer/tester feedback from the thread if any.
2. Run the project **build** (\`pnpm build\`, \`npm run build\`, etc.) and fix issues until build passes.
3. Multi-step: if you cannot finish this round, explain why and set **done** to false.
Do **not** run \`git checkout -b\`, \`git add\`, \`git commit\`, or \`git push\`. **Never** create commits on any branch — branching and commits are handled by the **committer** step after you finish.
Then close with JSON:
\`\`\`json
{ "done": true }
\`\`\`
or \`{ "done": false }\` matching whether implementation is complete.
**done=true** only when changes are complete **and** build passes in this round.`;
}
export const implementMetaSchema = z.object({
done: z.boolean().describe("true when changes are complete and build passes this round"),
});
export type ImplementMeta = z.infer<typeof implementMetaSchema>;
export type CreateImplementRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
export function createImplementRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreateImplementRoleDeps,
): Role<ImplementMeta> {
return async (ctx: ThreadContext): Promise<RoleResult<ImplementMeta>> => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "implement cannot run: missing repo path in thread markers",
meta: { done: false },
};
}
const innerRole = createRole(
adapter,
async (innerCtx: ThreadContext) =>
buildImplementPrompt({
threadId: innerCtx.start.meta.threadId,
nerveRoot,
}),
implementMetaSchema,
extract,
);
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: cwd },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `implement failed: ${msg}`,
meta: { done: false },
};
}
};
}

View File

@ -0,0 +1,65 @@
import type { Role, RoleResult, StartStep, WorkflowMessage } from "@uncaged/nerve-core";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../../lib/repo-context.js";
import { buildImplementPrompt } from "./prompt.js";
export const implementMetaSchema = z.object({
done: z.boolean().describe("true when changes are complete and build passes this round"),
});
export type ImplementMeta = z.infer<typeof implementMetaSchema>;
export type BuildImplementDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
const CURSOR_TIMEOUT_MS = 300_000;
export function buildImplementRole({
extract,
nerveRoot,
}: BuildImplementDeps): Role<ImplementMeta> {
return async (start: StartStep, messages: WorkflowMessage[]): Promise<RoleResult<ImplementMeta>> => {
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "implement cannot run: missing repo path in thread markers",
meta: { done: false },
};
}
const innerRole = createRole(
createCursorAdapter({
type: "cursor",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
async (innerStart: StartStep) =>
buildImplementPrompt({
threadId: innerStart.meta.threadId,
nerveRoot,
}),
implementMetaSchema,
extract,
);
const innerStart = {
...start,
meta: { ...start.meta, workdir: cwd },
} as StartStep;
try {
return await innerRole(innerStart, messages);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `implement failed: ${msg}`,
meta: { done: false },
};
}
};
}

View File

@ -0,0 +1,24 @@
export function buildImplementPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **implement** agent. You apply code changes for the issue.
Read workflow context (plan, reviewer/test feedback): \`nerve thread show ${threadId}\`
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
Your cwd is the target repository.
## Requirements
1. Create a branch: \`fix/issue-<number>-<short-slug>\` (use \`feat/\` if the issue is clearly a feature). Use a slug from the issue title (lowercase, hyphens).
2. Implement the planned changes; address reviewer/tester feedback from the thread if any.
3. Run the project **build** (\`pnpm build\`, \`npm run build\`, etc.) and fix issues until build passes.
4. Multi-step: if you cannot finish this round, explain why and set **done** to false.
Then close with JSON:
\`\`\`json
{ "done": true }
\`\`\`
or \`{ "done": false }\` matching whether implementation is complete.
**done=true** only when changes are complete **and** build passes in this round.`;
}

View File

@ -1,88 +0,0 @@
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../lib/repo-context.js";
function buildPlanPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **plan** agent (analysis only — ask mode). You produce an implementation plan for fixing the issue.
Read workflow context: \`nerve thread show ${threadId}\`
Read Nerve workspace conventions (coding rules for agents): \`cat ${nerveRoot}/CONVENTIONS.md\`
In the **target repository** (your cwd), skim relevant files and read \`CONVENTIONS.md\` **if it exists** there.
## Output
Write an implementation plan in **markdown** with:
1. Problem understanding
2. Change strategy
3. Target files (paths)
4. **Test commands** to run (explicit shell commands, e.g. \`pnpm test\`, \`pnpm vitest run\`)
5. Risks
End your reply with a JSON code block (meta signal):
\`\`\`json
{ "ready": true }
\`\`\`
Use \`{ "ready": false }\` if the plan cannot be made actionable.
**ready=true** only when the plan is clear and actionable.`;
}
export const planMetaSchema = z.object({
ready: z.boolean().describe("true if plan is clear and actionable"),
});
export type PlanMeta = z.infer<typeof planMetaSchema>;
export type CreatePlanRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
export function createPlanRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreatePlanRoleDeps,
): Role<PlanMeta> {
return async (ctx: ThreadContext): Promise<RoleResult<PlanMeta>> => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "plan cannot run: missing ---SOLVE_ISSUE_REPO--- or ---SOLVE_ISSUE_PARSE--- in thread",
meta: { ready: false },
};
}
const innerRole = createRole(
adapter,
async (innerCtx: ThreadContext) =>
buildPlanPrompt({
threadId: innerCtx.start.meta.threadId,
nerveRoot,
}),
planMetaSchema,
extract,
);
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: cwd },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `plan failed: ${msg}`,
meta: { ready: false },
};
}
};
}

View File

@ -0,0 +1,63 @@
import type { Role, RoleResult, StartStep, WorkflowMessage } from "@uncaged/nerve-core";
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveRepoCwd } from "../../lib/repo-context.js";
import { buildPlanPrompt } from "./prompt.js";
export const planMetaSchema = z.object({
ready: z.boolean().describe("true if plan is clear and actionable"),
});
export type PlanMeta = z.infer<typeof planMetaSchema>;
export type BuildPlanDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
const CURSOR_TIMEOUT_MS = 300_000;
export function buildPlanRole({ extract, nerveRoot }: BuildPlanDeps): Role<PlanMeta> {
return async (start: StartStep, messages: WorkflowMessage[]): Promise<RoleResult<PlanMeta>> => {
const cwd = resolveRepoCwd(messages);
if (cwd === null) {
return {
content: "plan cannot run: missing ---SOLVE_ISSUE_REPO--- or ---SOLVE_ISSUE_PARSE--- in thread",
meta: { ready: false },
};
}
const innerRole = createRole(
createCursorAdapter({
type: "cursor",
mode: "ask",
model: "auto",
timeout: CURSOR_TIMEOUT_MS,
}),
async (innerStart: StartStep) =>
buildPlanPrompt({
threadId: innerStart.meta.threadId,
nerveRoot,
}),
planMetaSchema,
extract,
);
const innerStart = {
...start,
meta: { ...start.meta, workdir: cwd },
} as StartStep;
try {
return await innerRole(innerStart, messages);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
return {
content: `plan failed: ${msg}`,
meta: { ready: false },
};
}
};
}

View File

@ -0,0 +1,27 @@
export function buildPlanPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **plan** agent (analysis only — ask mode). You produce an implementation plan for fixing the issue.
Read workflow context: \`nerve thread show ${threadId}\`
Read Nerve workspace conventions (coding rules for agents): \`cat ${nerveRoot}/CONVENTIONS.md\`
In the **target repository** (your cwd), skim relevant files and read \`CONVENTIONS.md\` **if it exists** there.
## Output
Write an implementation plan in **markdown** with:
1. Problem understanding
2. Change strategy
3. Target files (paths)
4. **Test commands** to run (explicit shell commands, e.g. \`pnpm test\`, \`pnpm vitest run\`)
5. Risks
End your reply with a JSON code block (meta signal):
\`\`\`json
{ "ready": true }
\`\`\`
Use \`{ "ready": false }\` if the plan cannot be made actionable.
**ready=true** only when the plan is clear and actionable.`;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const prepareMetaSchema = z.object({
ready: z.boolean().describe("true if repo is ready and baseline build ok"),
});
export type PrepareMeta = z.infer<typeof prepareMetaSchema>;

View File

@ -1,9 +1,4 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function preparePrompt({ threadId }: { threadId: string }): string {
export function preparePrompt({ threadId }: { threadId: string }): string {
return `You are the **prepare** agent. You ensure the target repository is ready for work.
Read prior messages / thread for issue markers: \`nerve thread show ${threadId}\`
@ -57,17 +52,3 @@ or \`{ "ready": false }\` if the repo is invalid, or install/build baseline fail
**ready=true** only when the repo exists at \`path\`, is clean, dependencies installed, and baseline build succeeded (or no build script).`;
}
export const prepareMetaSchema = z.object({
ready: z.boolean().describe("true if repo is ready and baseline build ok"),
});
export type PrepareMeta = z.infer<typeof prepareMetaSchema>;
export function createPrepareRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<PrepareMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => preparePrompt({ threadId: ctx.start.meta.threadId }),
prepareMetaSchema,
extract,
);
}

View File

@ -1,110 +0,0 @@
import { mkdirSync, writeFileSync } from "node:fs";
import { join } from "node:path";
import type { AgentFn, Role, RoleResult, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, isDryRun } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function buildPublishPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **publish** agent (Hermes). Test has passed. Open a pull request for the current branch using the **tea** CLI.
## Context
- Read the full workflow thread: \`nerve thread show ${threadId}\`
- Nerve workspace conventions (for tone/consistency, optional): \`cat ${nerveRoot}/CONVENTIONS.md\`
## Repo and issue (from the thread)
Find \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\` in prior messages. You need:
- \`path\` — clone checkout directory (this is your working copy)
- \`host\`, \`owner\`, \`repo\`, \`number\` for the issue
- \`defaultBranch\` (for PR base) from SOLVE_ISSUE_REPO
**Issue link** for the Ref section: \`https://<host>/<owner>/<repo>/issues/<number>\`
## Steps (in order)
1. \`cd\` to the **repo \`path\`**. Run \`git rev-parse --abbrev-ref HEAD\` to get the current branch name. The **committer** step should already have pushed this branch; run \`git push -u origin <that-branch>\` only if the branch is not yet on the remote.
2. Choose a **PR title** that reflects the real change (not a generic \`fix: issue #N\`): derive it from the issue title, plan, and thread summary (keep it concise; Conventional Commits style is fine, e.g. \`fix(auth): handle session expiry\`).
3. Write a **PR body** in Markdown with exactly these sections, in this order, each with a \`##\` heading (fill with concise content based on the thread: plan, implement, review, test):
- **## What** one short paragraph: what this PR does
- **## Why** one short paragraph: motivation / issue
- **## Changes** bullet list of notable changes
- **## Ref** include one line \`Fixes #<number>\` (same \`number\` from SOLVE_ISSUE_PARSE; closes/links the issue where supported) **and** the issue URL \`https://<host>/<owner>/<repo>/issues/<number>\`
4. Create the PR with **tea** (not curl/fetch to Gitea):
- \`tea pr create --repo <owner>/<repo> --base <defaultBranch> --head <branch> --title "<your meaningful title>" --body <your markdown body>\`
- You may use a heredoc or a temp file for \`--body\` if the shell requires it; keep the four sections in the body.
5. Confirm the PR was created (tea prints a URL or PR number in typical setups).
**success=true** only if both **push** and **tea** PR creation succeed. If any step fails, set **success=false** and say why.
End your reply with a JSON line:
\`\`\`json
{ "success": true }
\`\`\`
or
\`\`\`json
{ "success": false }
\`\`\``;
}
export const publishMetaSchema = z.object({
success: z.boolean().describe("true if git push and tea pr create both succeeded"),
});
export type PublishMeta = z.infer<typeof publishMetaSchema>;
export type CreatePublishRoleDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
function logPath(nerveRoot: string): string {
return join(nerveRoot, "logs", `solve-issue-publish-${Date.now()}.log`);
}
export function createPublishRole(
adapter: AgentFn,
{ extract, nerveRoot }: CreatePublishRoleDeps,
): Role<PublishMeta> {
const innerRole = createRole(
adapter,
async (ctx: ThreadContext) =>
buildPublishPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
publishMetaSchema,
extract,
);
return async (ctx: ThreadContext): Promise<RoleResult<PublishMeta>> => {
const file = logPath(nerveRoot);
mkdirSync(join(file, ".."), { recursive: true });
if (isDryRun(ctx.start)) {
const msg = "[dry-run] publish skipped (no git push / PR)";
writeFileSync(file, `${msg}\n`, "utf-8");
return {
content: `[dry-run] publish skipped — log: ${file}`,
meta: { success: true },
};
}
const innerCtx: ThreadContext = {
...ctx,
start: {
...ctx.start,
meta: { ...ctx.start.meta, workdir: nerveRoot },
},
};
try {
return await innerRole(innerCtx);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
const body = `publish failed: ${msg}\n`;
writeFileSync(file, body, "utf-8");
return {
content: `publish failed: ${msg}\nLog: ${file}`,
meta: { success: false },
};
}
};
}

View File

@ -0,0 +1,64 @@
import { mkdirSync, writeFileSync } from "node:fs";
import { join } from "node:path";
import type { Role, RoleResult, StartStep, WorkflowMessage } from "@uncaged/nerve-core";
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole, isDryRun } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { buildPublishPrompt } from "./prompt.js";
export const publishMetaSchema = z.object({
success: z.boolean().describe("true if git push and tea pr create both succeeded"),
});
export type PublishMeta = z.infer<typeof publishMetaSchema>;
export type BuildPublishDeps = {
extract: LlmExtractorConfig;
nerveRoot: string;
};
function logPath(nerveRoot: string): string {
return join(nerveRoot, "logs", `solve-issue-publish-${Date.now()}.log`);
}
export function buildPublishRole({ extract, nerveRoot }: BuildPublishDeps): Role<PublishMeta> {
const innerRole = createRole(
hermesAdapter,
async (start: StartStep) =>
buildPublishPrompt({ threadId: start.meta.threadId, nerveRoot }),
publishMetaSchema,
extract,
);
return async (start: StartStep, messages: WorkflowMessage[]): Promise<RoleResult<PublishMeta>> => {
const file = logPath(nerveRoot);
mkdirSync(join(file, ".."), { recursive: true });
if (isDryRun(start)) {
const msg = "[dry-run] publish skipped (no git push / PR)";
writeFileSync(file, `${msg}\n`, "utf-8");
return {
content: `[dry-run] publish skipped — log: ${file}`,
meta: { success: true },
};
}
const innerStart = {
...start,
meta: { ...start.meta, workdir: nerveRoot },
} as StartStep;
try {
return await innerRole(innerStart, messages);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
const body = `publish failed: ${msg}\n`;
writeFileSync(file, body, "utf-8");
return {
content: `publish failed: ${msg}\nLog: ${file}`,
meta: { success: false },
};
}
};
}

View File

@ -0,0 +1,42 @@
export function buildPublishPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are the **publish** agent (Hermes). Test has passed. Open a pull request for the current branch using the **tea** CLI.
## Context
- Read the full workflow thread: \`nerve thread show ${threadId}\`
- Nerve workspace conventions (for tone/consistency, optional): \`cat ${nerveRoot}/CONVENTIONS.md\`
## Repo and issue (from the thread)
Find \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\` in prior messages. You need:
- \`path\` — clone checkout directory (this is your working copy)
- \`host\`, \`owner\`, \`repo\`, \`number\` for the issue
- \`defaultBranch\` (for PR base) from SOLVE_ISSUE_REPO
**Issue link** for the Ref section: \`https://<host>/<owner>/<repo>/issues/<number>\`
## Steps (in order)
1. \`cd\` to the **repo \`path\`**. Run \`git rev-parse --abbrev-ref HEAD\` to get the current branch name.
2. \`git push -u origin <that-branch>\` (must succeed before PR).
3. Write a **PR body** in Markdown with exactly these sections, in this order, each with a \`##\` heading (fill with concise content based on the thread: plan, implement, review, test):
- **## What** one short paragraph: what this PR does
- **## Why** one short paragraph: motivation / issue
- **## Changes** bullet list of notable changes
- **## Ref** the issue link above
4. Create the PR with **tea** (not curl/fetch to Gitea):
- \`tea pr create --repo <owner>/<repo> --base <defaultBranch> --head <branch> --title "fix: issue #<number>" --body <your markdown body>\`
- You may use a heredoc or a temp file for \`--body\` if the shell requires it; keep the four sections in the body.
5. Confirm the PR was created (tea prints a URL or PR number in typical setups).
**success=true** only if both **push** and **tea** PR creation succeed. If any step fails, set **success=false** and say why.
End your reply with a JSON line:
\`\`\`json
{ "success": true }
\`\`\`
or
\`\`\`json
{ "success": false }
\`\`\``;
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const readIssueMetaSchema = z.object({
ready: z.boolean().describe("true if issue content was fetched and markers are present"),
});
export type ReadIssueMeta = z.infer<typeof readIssueMetaSchema>;

View File

@ -1,9 +1,4 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function readIssuePrompt({ threadId }: { threadId: string }): string {
export function readIssuePrompt({ threadId }: { threadId: string }): string {
return `You are the **read-issue** agent. You fetch Gitea issue content via the \`tea\` CLI.
Read the workflow thread start prompt for the issue URL (same run): \`nerve thread show ${threadId}\`
@ -37,17 +32,3 @@ Use \`{ "ready": false }\` if you could not fetch or parse the issue.
**ready=true** only if the issue was fetched successfully and the marker block is correct.`;
}
export const readIssueMetaSchema = z.object({
ready: z.boolean().describe("true if issue content was fetched and markers are present"),
});
export type ReadIssueMeta = z.infer<typeof readIssueMetaSchema>;
export function createReadIssueRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<ReadIssueMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => readIssuePrompt({ threadId: ctx.start.meta.threadId }),
readIssueMetaSchema,
extract,
);
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const reviewMetaSchema = z.object({
approved: z.boolean().describe("true if diff is clean and ready for tests"),
});
export type ReviewMeta = z.infer<typeof reviewMetaSchema>;

View File

@ -1,9 +1,4 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function reviewPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
export function reviewPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
return `You are a **code reviewer** (Hermes). You run after implement and before test.
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
@ -38,22 +33,3 @@ or
{ "approved": false }
\`\`\``;
}
export const reviewMetaSchema = z.object({
approved: z.boolean().describe("true if diff is clean and ready for tests"),
});
export type ReviewMeta = z.infer<typeof reviewMetaSchema>;
export function createReviewRole(
adapter: AgentFn,
extract: LlmExtractorConfig,
nerveRoot: string,
): Role<ReviewMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) =>
reviewPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
reviewMetaSchema,
extract,
);
}

View File

@ -0,0 +1,6 @@
import { z } from "zod";
export const testMetaSchema = z.object({
passed: z.boolean().describe("true if all test commands passed"),
});
export type TestMeta = z.infer<typeof testMetaSchema>;

View File

@ -1,9 +1,4 @@
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
function testPrompt({ threadId }: { threadId: string }): string {
export function testPrompt({ threadId }: { threadId: string }): string {
return `You are the **test** agent (Hermes). You execute automated tests for the change.
Read workflow context: \`nerve thread show ${threadId}\`
@ -24,17 +19,3 @@ or \`{ "passed": false }\`
**passed=true** only if every executed command exited 0 (or skip was justified with no failing command).`;
}
export const testMetaSchema = z.object({
passed: z.boolean().describe("true if all test commands passed"),
});
export type TestMeta = z.infer<typeof testMetaSchema>;
export function createTestRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<TestMeta> {
return createRole(
adapter,
async (ctx: ThreadContext) => testPrompt({ threadId: ctx.start.meta.threadId }),
testMetaSchema,
extract,
);
}

View File

@ -0,0 +1,13 @@
{
"compilerOptions": {
"target": "ES2022",
"lib": ["ES2022"],
"module": "NodeNext",
"moduleResolution": "NodeNext",
"strict": true,
"skipLibCheck": true,
"noEmit": true,
"types": ["node"]
},
"include": ["./**/*.ts"]
}