fix: add dryRun early-return for all roles in workflow-generator
llmExtract returns {} as T in dryRun mode, causing .map() on undefined.
Each role now returns mock data immediately when dryRun is true.
小橘 🍊(NEKO Team)
This commit is contained in:
parent
7bfb24c2c1
commit
9f2067db7d
23
biome.json
Normal file
23
biome.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/1.9.0/schema.json",
|
||||
"formatter": {
|
||||
"indentStyle": "space",
|
||||
"indentWidth": 2,
|
||||
"lineWidth": 100
|
||||
},
|
||||
"javascript": {
|
||||
"formatter": {
|
||||
"quoteStyle": "double",
|
||||
"semicolons": "always"
|
||||
}
|
||||
},
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": true,
|
||||
"suspicious": {
|
||||
"noConsole": "error"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -6,7 +6,9 @@
|
||||
"dependencies": {
|
||||
"@uncaged/nerve-core": "latest",
|
||||
"@uncaged/nerve-daemon": "latest",
|
||||
"drizzle-orm": "latest"
|
||||
"@uncaged/nerve-workflow-utils": "latest",
|
||||
"drizzle-orm": "latest",
|
||||
"zod": "^4.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"drizzle-kit": "latest"
|
||||
@ -17,7 +19,8 @@
|
||||
],
|
||||
"overrides": {
|
||||
"@uncaged/nerve-daemon": "link:../repos/nerve/packages/daemon",
|
||||
"@uncaged/nerve-core": "link:../repos/nerve/packages/core"
|
||||
"@uncaged/nerve-core": "link:../repos/nerve/packages/core",
|
||||
"@uncaged/nerve-workflow-utils": "link:../repos/nerve/packages/workflow-utils"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
12
pnpm-lock.yaml
generated
12
pnpm-lock.yaml
generated
@ -7,6 +7,7 @@ settings:
|
||||
overrides:
|
||||
'@uncaged/nerve-daemon': link:../repos/nerve/packages/daemon
|
||||
'@uncaged/nerve-core': link:../repos/nerve/packages/core
|
||||
'@uncaged/nerve-workflow-utils': link:../repos/nerve/packages/workflow-utils
|
||||
|
||||
importers:
|
||||
|
||||
@ -18,9 +19,15 @@ importers:
|
||||
'@uncaged/nerve-daemon':
|
||||
specifier: link:../repos/nerve/packages/daemon
|
||||
version: link:../repos/nerve/packages/daemon
|
||||
'@uncaged/nerve-workflow-utils':
|
||||
specifier: link:../repos/nerve/packages/workflow-utils
|
||||
version: link:../repos/nerve/packages/workflow-utils
|
||||
drizzle-orm:
|
||||
specifier: latest
|
||||
version: 0.45.2(better-sqlite3@11.10.0)
|
||||
zod:
|
||||
specifier: ^4.3.6
|
||||
version: 4.3.6
|
||||
devDependencies:
|
||||
drizzle-kit:
|
||||
specifier: latest
|
||||
@ -749,6 +756,9 @@ packages:
|
||||
wrappy@1.0.2:
|
||||
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
|
||||
|
||||
zod@4.3.6:
|
||||
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
|
||||
|
||||
snapshots:
|
||||
|
||||
'@drizzle-team/brocli@0.10.2': {}
|
||||
@ -1281,3 +1291,5 @@ snapshots:
|
||||
|
||||
wrappy@1.0.2:
|
||||
optional: true
|
||||
|
||||
zod@4.3.6: {}
|
||||
|
||||
121
senses/hermes-session-message-stats/index.js
Normal file
121
senses/hermes-session-message-stats/index.js
Normal file
@ -0,0 +1,121 @@
|
||||
import { createReadStream } from "node:fs";
|
||||
import { readdir } from "node:fs/promises";
|
||||
import { homedir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { createInterface } from "node:readline";
|
||||
import { hermesSessionMessageStats } from "./schema.ts";
|
||||
|
||||
const MEASUREMENT_WINDOW_MS = 900_000;
|
||||
const MEASUREMENT_WINDOW_SECONDS = 900;
|
||||
|
||||
/**
|
||||
* @param {string} filePath
|
||||
* @param {number} cutoffMs
|
||||
* @param {number} nowMs
|
||||
* @returns {Promise<{ user: number; assistant: number; tool: number; fileHadActivity: boolean }>}
|
||||
*/
|
||||
async function aggregateJsonlFile(filePath, cutoffMs, nowMs) {
|
||||
let user = 0;
|
||||
let assistant = 0;
|
||||
let tool = 0;
|
||||
let fileHadActivity = false;
|
||||
|
||||
const input = createReadStream(filePath, { encoding: "utf8" });
|
||||
const rl = createInterface({ input, crlfDelay: Infinity });
|
||||
try {
|
||||
for await (const line of rl) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
let obj;
|
||||
try {
|
||||
obj = JSON.parse(trimmed);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (typeof obj.role !== "string" || typeof obj.timestamp !== "string") {
|
||||
continue;
|
||||
}
|
||||
const t = Date.parse(obj.timestamp);
|
||||
if (!Number.isFinite(t) || t < cutoffMs || t > nowMs) continue;
|
||||
|
||||
const roleNorm = obj.role.trim().toLowerCase();
|
||||
if (roleNorm === "user") {
|
||||
user++;
|
||||
fileHadActivity = true;
|
||||
} else if (roleNorm === "assistant") {
|
||||
assistant++;
|
||||
fileHadActivity = true;
|
||||
} else if (roleNorm === "tool") {
|
||||
tool++;
|
||||
fileHadActivity = true;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
rl.close();
|
||||
}
|
||||
|
||||
return { user, assistant, tool, fileHadActivity };
|
||||
}
|
||||
|
||||
export async function compute(db, _peers) {
|
||||
const nowMs = Date.now();
|
||||
const cutoffMs = nowMs - MEASUREMENT_WINDOW_MS;
|
||||
const ts = nowMs;
|
||||
|
||||
let totalUserMessages = 0;
|
||||
let totalAssistantMessages = 0;
|
||||
let totalToolMessages = 0;
|
||||
let activeSessions = 0;
|
||||
|
||||
const sessionsDir = join(homedir(), ".hermes", "sessions");
|
||||
let files = [];
|
||||
try {
|
||||
const entries = await readdir(sessionsDir, { withFileTypes: true });
|
||||
files = entries
|
||||
.filter((e) => e.isFile() && e.name.endsWith(".jsonl"))
|
||||
.map((e) => join(sessionsDir, e.name));
|
||||
} catch (err) {
|
||||
if (err && typeof err === "object" && "code" in err && err.code === "ENOENT") {
|
||||
files = [];
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
for (const filePath of files) {
|
||||
const { user, assistant, tool, fileHadActivity } = await aggregateJsonlFile(
|
||||
filePath,
|
||||
cutoffMs,
|
||||
nowMs,
|
||||
);
|
||||
totalUserMessages += user;
|
||||
totalAssistantMessages += assistant;
|
||||
totalToolMessages += tool;
|
||||
if (fileHadActivity) activeSessions++;
|
||||
}
|
||||
|
||||
const totalMessages =
|
||||
totalUserMessages + totalAssistantMessages + totalToolMessages;
|
||||
|
||||
const row = {
|
||||
ts,
|
||||
totalUserMessages,
|
||||
totalAssistantMessages,
|
||||
totalToolMessages,
|
||||
totalMessages,
|
||||
activeSessions,
|
||||
measurementWindowSeconds: MEASUREMENT_WINDOW_SECONDS,
|
||||
};
|
||||
|
||||
await db.insert(hermesSessionMessageStats).values(row);
|
||||
|
||||
return {
|
||||
ts: row.ts,
|
||||
totalUserMessages: row.totalUserMessages,
|
||||
totalAssistantMessages: row.totalAssistantMessages,
|
||||
totalToolMessages: row.totalToolMessages,
|
||||
totalMessages: row.totalMessages,
|
||||
activeSessions: row.activeSessions,
|
||||
measurementWindowSeconds: row.measurementWindowSeconds,
|
||||
};
|
||||
}
|
||||
13
senses/hermes-session-message-stats/migrations/0001_init.sql
Normal file
13
senses/hermes-session-message-stats/migrations/0001_init.sql
Normal file
@ -0,0 +1,13 @@
|
||||
-- Migration: 0001_init
|
||||
-- Creates the hermes_session_message_stats table for hermes-session-message-stats sense.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hermes_session_message_stats (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
ts INTEGER NOT NULL,
|
||||
total_user_messages INTEGER NOT NULL,
|
||||
total_assistant_messages INTEGER NOT NULL,
|
||||
total_tool_messages INTEGER NOT NULL,
|
||||
total_messages INTEGER NOT NULL,
|
||||
active_sessions INTEGER NOT NULL,
|
||||
measurement_window_seconds INTEGER NOT NULL
|
||||
);
|
||||
12
senses/hermes-session-message-stats/schema.ts
Normal file
12
senses/hermes-session-message-stats/schema.ts
Normal file
@ -0,0 +1,12 @@
|
||||
import { integer, sqliteTable } from "drizzle-orm/sqlite-core";
|
||||
|
||||
export const hermesSessionMessageStats = sqliteTable("hermes_session_message_stats", {
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
ts: integer("ts").notNull(),
|
||||
totalUserMessages: integer("total_user_messages").notNull(),
|
||||
totalAssistantMessages: integer("total_assistant_messages").notNull(),
|
||||
totalToolMessages: integer("total_tool_messages").notNull(),
|
||||
totalMessages: integer("total_messages").notNull(),
|
||||
activeSessions: integer("active_sessions").notNull(),
|
||||
measurementWindowSeconds: integer("measurement_window_seconds").notNull(),
|
||||
});
|
||||
575
workflows/pr-summarizer/index.ts
Normal file
575
workflows/pr-summarizer/index.ts
Normal file
@ -0,0 +1,575 @@
|
||||
/**
|
||||
* PR 摘要工作流:从 Gitea 拉取 PR 与 diff,可选 LLM 分析后输出中文 Markdown 总结。
|
||||
* 宿主需在 nerve.yaml 中注册 workflows.pr-summarizer;触发示例:
|
||||
* nerve workflow trigger pr-summarizer --payload '{"prompt":"<PR URL 或 JSON>"}'
|
||||
* Sense 可返回 workflow: `pr-summarizer|50|<prompt>`(见 parseSenseWorkflowDirective)。
|
||||
*/
|
||||
import type {
|
||||
ModeratorContext,
|
||||
RoleResult,
|
||||
StartStep,
|
||||
WorkflowDefinition,
|
||||
WorkflowMessage,
|
||||
} from "@uncaged/nerve-core";
|
||||
import { END } from "@uncaged/nerve-core";
|
||||
import {
|
||||
isDryRun,
|
||||
llmExtract,
|
||||
nerveAgentContext,
|
||||
readNerveYaml,
|
||||
spawnSafe,
|
||||
} from "@uncaged/nerve-workflow-utils";
|
||||
import { join } from "node:path";
|
||||
import { z } from "zod";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
|
||||
/** unified diff 写入 meta 前的最大字符数(超出则截断并在 content 中说明) */
|
||||
const DIFF_TEXT_MAX_CHARS = 1_500_000;
|
||||
/** 送给分析模型的 diff 前缀长度上限 */
|
||||
const DIFF_LLM_MAX_CHARS = 100_000;
|
||||
|
||||
type PrSummarizerMeta = {
|
||||
fetcher: {
|
||||
prUrl: string | null;
|
||||
owner: string | null;
|
||||
repo: string | null;
|
||||
prIndex: number | null;
|
||||
giteaBaseUrl: string | null;
|
||||
title: string | null;
|
||||
state: string | null;
|
||||
diffText: string | null;
|
||||
diffByteLength: number | null;
|
||||
httpStatus: number | null;
|
||||
errorMessage: string | null;
|
||||
};
|
||||
analyzer: {
|
||||
analysisMarkdown: string | null;
|
||||
providerModel: string | null;
|
||||
errorMessage: string | null;
|
||||
};
|
||||
writer: {
|
||||
summaryZhMarkdown: string | null;
|
||||
errorMessage: string | null;
|
||||
};
|
||||
};
|
||||
|
||||
const jsonPromptSchema = z.object({
|
||||
prUrl: z.string().nullish(),
|
||||
owner: z.string().nullish(),
|
||||
repo: z.string().nullish(),
|
||||
index: z.number().int().positive().nullish(),
|
||||
baseUrl: z.string().nullish(),
|
||||
});
|
||||
|
||||
const analysisExtractSchema = z
|
||||
.object({
|
||||
analysisMarkdown: z.string().describe("Technical PR analysis in Markdown (can be English)."),
|
||||
})
|
||||
.describe("Structured PR analysis from the diff.");
|
||||
|
||||
const summaryExtractSchema = z
|
||||
.object({
|
||||
summaryZhMarkdown: z
|
||||
.string()
|
||||
.describe(
|
||||
"Final deliverable: Chinese Markdown with title, key changes, risks, and test suggestions.",
|
||||
),
|
||||
})
|
||||
.describe("Chinese Markdown PR summary.");
|
||||
|
||||
function getNerveYaml(): string {
|
||||
const result = readNerveYaml({ nerveRoot: NERVE_ROOT });
|
||||
return result.ok ? result.value : "# nerve.yaml unavailable";
|
||||
}
|
||||
|
||||
async function cfgGet(key: string): Promise<string | null> {
|
||||
const result = await spawnSafe("cfg", ["get", key], {
|
||||
cwd: NERVE_ROOT,
|
||||
env: null,
|
||||
timeoutMs: 10_000,
|
||||
});
|
||||
if (!result.ok) {
|
||||
return null;
|
||||
}
|
||||
return result.value.stdout.trim() || null;
|
||||
}
|
||||
|
||||
async function resolveDashScopeProvider(): Promise<{
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
model: string;
|
||||
} | null> {
|
||||
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet("DASHSCOPE_API_KEY"));
|
||||
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet("DASHSCOPE_BASE_URL"));
|
||||
const model =
|
||||
process.env.DASHSCOPE_MODEL ?? (await cfgGet("DASHSCOPE_MODEL")) ?? "qwen-plus";
|
||||
if (!apiKey || !baseUrl) {
|
||||
return null;
|
||||
}
|
||||
return { apiKey, baseUrl, model };
|
||||
}
|
||||
|
||||
function parseGiteaPullUrl(raw: string): {
|
||||
giteaBaseUrl: string;
|
||||
owner: string;
|
||||
repo: string;
|
||||
prIndex: number;
|
||||
prUrl: string;
|
||||
} | null {
|
||||
let u: URL;
|
||||
try {
|
||||
u = new URL(raw.trim());
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
if (u.protocol !== "http:" && u.protocol !== "https:") {
|
||||
return null;
|
||||
}
|
||||
const parts = u.pathname.replace(/\/+$/, "").split("/").filter(Boolean);
|
||||
const pullsAt = parts.indexOf("pulls");
|
||||
if (pullsAt < 2 || pullsAt + 1 >= parts.length) {
|
||||
return null;
|
||||
}
|
||||
const indexStr = parts[pullsAt + 1];
|
||||
if (!indexStr || !/^\d+$/.test(indexStr)) {
|
||||
return null;
|
||||
}
|
||||
const owner = parts[pullsAt - 2];
|
||||
const repo = parts[pullsAt - 1];
|
||||
if (!owner || !repo) {
|
||||
return null;
|
||||
}
|
||||
const prIndex = Number.parseInt(indexStr, 10);
|
||||
if (!Number.isFinite(prIndex) || prIndex < 1) {
|
||||
return null;
|
||||
}
|
||||
const giteaBaseUrl = `${u.protocol}//${u.host}`;
|
||||
return { giteaBaseUrl, owner, repo, prIndex, prUrl: raw.trim() };
|
||||
}
|
||||
|
||||
type ResolvedPr = {
|
||||
prUrl: string | null;
|
||||
owner: string | null;
|
||||
repo: string | null;
|
||||
prIndex: number | null;
|
||||
giteaBaseUrl: string | null;
|
||||
parseError: string | null;
|
||||
};
|
||||
|
||||
function resolvePrFromContent(content: string): ResolvedPr {
|
||||
const empty: ResolvedPr = {
|
||||
prUrl: null,
|
||||
owner: null,
|
||||
repo: null,
|
||||
prIndex: null,
|
||||
giteaBaseUrl: null,
|
||||
parseError: null,
|
||||
};
|
||||
const trimmed = content.trim();
|
||||
if (!trimmed) {
|
||||
return { ...empty, parseError: "Empty prompt" };
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("{")) {
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(trimmed) as unknown;
|
||||
} catch {
|
||||
return { ...empty, parseError: "Invalid JSON in prompt" };
|
||||
}
|
||||
const row = jsonPromptSchema.safeParse(parsed);
|
||||
if (!row.success) {
|
||||
return { ...empty, parseError: `JSON validation failed: ${row.error.message}` };
|
||||
}
|
||||
const j = row.data;
|
||||
let owner: string | null = j.owner ?? null;
|
||||
let repo: string | null = j.repo ?? null;
|
||||
let prIndex: number | null = j.index ?? null;
|
||||
let giteaBaseUrl: string | null = j.baseUrl ?? null;
|
||||
let prUrl: string | null = j.prUrl ?? null;
|
||||
|
||||
if (j.prUrl) {
|
||||
const p = parseGiteaPullUrl(j.prUrl);
|
||||
if (p) {
|
||||
owner = owner ?? p.owner;
|
||||
repo = repo ?? p.repo;
|
||||
prIndex = prIndex ?? p.prIndex;
|
||||
giteaBaseUrl = giteaBaseUrl ?? p.giteaBaseUrl;
|
||||
prUrl = prUrl ?? p.prUrl;
|
||||
}
|
||||
}
|
||||
|
||||
if (owner && repo && prIndex !== null && giteaBaseUrl) {
|
||||
const normalizedBase = giteaBaseUrl.replace(/\/+$/, "");
|
||||
const builtUrl = `${normalizedBase}/${owner}/${repo}/pulls/${prIndex}`;
|
||||
return {
|
||||
prUrl: prUrl ?? builtUrl,
|
||||
owner,
|
||||
repo,
|
||||
prIndex,
|
||||
giteaBaseUrl: normalizedBase,
|
||||
parseError: null,
|
||||
};
|
||||
}
|
||||
return {
|
||||
...empty,
|
||||
parseError: "JSON prompt must include resolvable owner, repo, pr index, and baseUrl (or prUrl)",
|
||||
};
|
||||
}
|
||||
|
||||
const p = parseGiteaPullUrl(trimmed);
|
||||
if (!p) {
|
||||
return {
|
||||
...empty,
|
||||
parseError: "Not a valid Gitea PR URL (expected https://host/owner/repo/pulls/NUMBER)",
|
||||
};
|
||||
}
|
||||
return {
|
||||
prUrl: p.prUrl,
|
||||
owner: p.owner,
|
||||
repo: p.repo,
|
||||
prIndex: p.prIndex,
|
||||
giteaBaseUrl: p.giteaBaseUrl.replace(/\/+$/, ""),
|
||||
parseError: null,
|
||||
};
|
||||
}
|
||||
|
||||
function emptyFetcherMeta(): PrSummarizerMeta["fetcher"] {
|
||||
return {
|
||||
prUrl: null,
|
||||
owner: null,
|
||||
repo: null,
|
||||
prIndex: null,
|
||||
giteaBaseUrl: null,
|
||||
title: null,
|
||||
state: null,
|
||||
diffText: null,
|
||||
diffByteLength: null,
|
||||
httpStatus: null,
|
||||
errorMessage: null,
|
||||
};
|
||||
}
|
||||
|
||||
const workflow: WorkflowDefinition<PrSummarizerMeta> = {
|
||||
name: "pr-summarizer",
|
||||
|
||||
roles: {
|
||||
async fetcher(start: StartStep): Promise<RoleResult<PrSummarizerMeta["fetcher"]>> {
|
||||
const resolved = resolvePrFromContent(start.content);
|
||||
if (resolved.parseError !== null) {
|
||||
const meta: PrSummarizerMeta["fetcher"] = {
|
||||
...emptyFetcherMeta(),
|
||||
errorMessage: resolved.parseError,
|
||||
};
|
||||
return { content: `Fetcher: parse error — ${resolved.parseError}`, meta };
|
||||
}
|
||||
|
||||
const token = process.env.GITEA_TOKEN ?? null;
|
||||
if (!token || token.trim() === "") {
|
||||
const meta: PrSummarizerMeta["fetcher"] = {
|
||||
...emptyFetcherMeta(),
|
||||
prUrl: resolved.prUrl,
|
||||
owner: resolved.owner,
|
||||
repo: resolved.repo,
|
||||
prIndex: resolved.prIndex,
|
||||
giteaBaseUrl: resolved.giteaBaseUrl,
|
||||
errorMessage: "GITEA_TOKEN is not set",
|
||||
};
|
||||
return { content: "Fetcher: missing GITEA_TOKEN (set env before running).", meta };
|
||||
}
|
||||
|
||||
const apiRoot = `${resolved.giteaBaseUrl}/api/v1`;
|
||||
const pullJsonUrl = `${apiRoot}/repos/${resolved.owner}/${resolved.repo}/pulls/${resolved.prIndex}`;
|
||||
const pullDiffUrl = `${pullJsonUrl}.diff`;
|
||||
|
||||
const headersJson: Record<string, string> = {
|
||||
Authorization: `token ${token}`,
|
||||
Accept: "application/json",
|
||||
};
|
||||
|
||||
let title: string | null = null;
|
||||
let state: string | null = null;
|
||||
let httpStatus: number | null = null;
|
||||
let jsonError: string | null = null;
|
||||
|
||||
try {
|
||||
const prRes = await fetch(pullJsonUrl, { headers: headersJson });
|
||||
httpStatus = prRes.status;
|
||||
const bodyText = await prRes.text();
|
||||
if (!prRes.ok) {
|
||||
jsonError = `GET PR JSON failed: HTTP ${prRes.status} ${bodyText.slice(0, 500)}`;
|
||||
} else {
|
||||
const data = JSON.parse(bodyText) as Record<string, unknown>;
|
||||
const t = data.title;
|
||||
const s = data.state;
|
||||
title = typeof t === "string" ? t : null;
|
||||
state = typeof s === "string" ? s : null;
|
||||
}
|
||||
} catch (e) {
|
||||
jsonError = e instanceof Error ? e.message : String(e);
|
||||
}
|
||||
|
||||
let diffText: string | null = null;
|
||||
let diffByteLength: number | null = null;
|
||||
let diffError: string | null = jsonError;
|
||||
let diffCharTruncated = false;
|
||||
|
||||
if (jsonError === null) {
|
||||
try {
|
||||
const diffRes = await fetch(pullDiffUrl, {
|
||||
headers: {
|
||||
Authorization: `token ${token}`,
|
||||
Accept: "text/plain",
|
||||
},
|
||||
});
|
||||
httpStatus = diffRes.status;
|
||||
const rawDiff = await diffRes.text();
|
||||
if (!diffRes.ok) {
|
||||
diffError = `GET PR diff failed: HTTP ${diffRes.status} ${rawDiff.slice(0, 500)}`;
|
||||
} else {
|
||||
diffByteLength = Buffer.byteLength(rawDiff, "utf8");
|
||||
if (rawDiff.length > DIFF_TEXT_MAX_CHARS) {
|
||||
diffText = rawDiff.slice(0, DIFF_TEXT_MAX_CHARS);
|
||||
diffCharTruncated = true;
|
||||
diffError = null;
|
||||
} else {
|
||||
diffText = rawDiff;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
diffError = e instanceof Error ? e.message : String(e);
|
||||
}
|
||||
}
|
||||
|
||||
const truncatedNote =
|
||||
diffCharTruncated && diffByteLength !== null
|
||||
? ` (diff truncated in meta to ${DIFF_TEXT_MAX_CHARS} chars; full byte length ${diffByteLength})`
|
||||
: "";
|
||||
|
||||
const meta: PrSummarizerMeta["fetcher"] = {
|
||||
prUrl: resolved.prUrl,
|
||||
owner: resolved.owner,
|
||||
repo: resolved.repo,
|
||||
prIndex: resolved.prIndex,
|
||||
giteaBaseUrl: resolved.giteaBaseUrl,
|
||||
title,
|
||||
state,
|
||||
diffText,
|
||||
diffByteLength,
|
||||
httpStatus,
|
||||
errorMessage: diffError,
|
||||
};
|
||||
|
||||
const content =
|
||||
diffError !== null
|
||||
? `Fetcher: ${resolved.owner}/${resolved.repo}#${resolved.prIndex} — failed. ${diffError}`
|
||||
: `Fetcher: ${resolved.owner}/${resolved.repo}#${resolved.prIndex} — ${title ?? "(no title)"} [${state ?? "?"}] diff bytes=${diffByteLength ?? 0} HTTP=${httpStatus ?? "?"}${truncatedNote}`;
|
||||
|
||||
return { content, meta };
|
||||
},
|
||||
|
||||
async analyzer(
|
||||
start: StartStep,
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<PrSummarizerMeta["analyzer"]>> {
|
||||
const last = messages[messages.length - 1];
|
||||
const fm = last.meta as PrSummarizerMeta["fetcher"];
|
||||
|
||||
const skip = (reason: string): RoleResult<PrSummarizerMeta["analyzer"]> => ({
|
||||
content: `Analyzer skipped: ${reason}\n\n${reason}`,
|
||||
meta: {
|
||||
analysisMarkdown: `## 无法分析\n\n${reason}`,
|
||||
providerModel: null,
|
||||
errorMessage: reason,
|
||||
},
|
||||
});
|
||||
|
||||
if (last.role !== "fetcher") {
|
||||
return skip("上一则消息不是 fetcher 输出");
|
||||
}
|
||||
|
||||
if (fm.errorMessage !== null) {
|
||||
return skip(`拉取阶段失败: ${fm.errorMessage}`);
|
||||
}
|
||||
|
||||
const diff = fm.diffText;
|
||||
if (diff === null || diff.length === 0) {
|
||||
return skip("diff 为空,无法分析");
|
||||
}
|
||||
|
||||
if (isDryRun(start)) {
|
||||
return {
|
||||
content: "[dryRun] Analyzer skipped real LLM call.",
|
||||
meta: {
|
||||
analysisMarkdown: "## dryRun\n\n未调用模型。",
|
||||
providerModel: null,
|
||||
errorMessage: null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const provider = await resolveDashScopeProvider();
|
||||
if (provider === null) {
|
||||
const excerpt = diff.split("\n").slice(0, 80).join("\n");
|
||||
const analysisMarkdown =
|
||||
`## 静态摘要(无 LLM 凭据)\n\n` +
|
||||
`- 仓库: ${fm.owner}/${fm.repo} PR #${fm.prIndex}\n` +
|
||||
`- 标题: ${fm.title ?? "(null)"}\n` +
|
||||
`- diff 行数(近似): ${diff.split("\n").length}\n\n` +
|
||||
`### Diff 开头\n\n\`\`\`diff\n${excerpt}\n\`\`\`\n`;
|
||||
return {
|
||||
content: analysisMarkdown,
|
||||
meta: {
|
||||
analysisMarkdown,
|
||||
providerModel: null,
|
||||
errorMessage: null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const diffForModel = diff.length > DIFF_LLM_MAX_CHARS ? diff.slice(0, DIFF_LLM_MAX_CHARS) : diff;
|
||||
const truncated = diff.length > DIFF_LLM_MAX_CHARS;
|
||||
|
||||
const bundle =
|
||||
`Repository: ${fm.owner}/${fm.repo} PR index ${fm.prIndex}\n` +
|
||||
`Title: ${fm.title ?? ""}\n` +
|
||||
`State: ${fm.state ?? ""}\n` +
|
||||
(truncated ? `\n(diff truncated for model input to ${DIFF_LLM_MAX_CHARS} chars)\n` : "") +
|
||||
`\n--- unified diff ---\n${diffForModel}`;
|
||||
|
||||
const extractPrompt =
|
||||
`${nerveAgentContext}\n\n` +
|
||||
`You are a senior reviewer. Analyze this Gitea pull request diff.\n` +
|
||||
`Output structured findings as Markdown: scope, files touched, behavior change, risks, test ideas.\n\n` +
|
||||
`Optional nerve.yaml context:\n\`\`\`yaml\n${getNerveYaml().slice(0, 4000)}\n\`\`\`\n\n` +
|
||||
`---\n${bundle}`;
|
||||
|
||||
const extracted = await llmExtract({
|
||||
text: extractPrompt,
|
||||
schema: analysisExtractSchema,
|
||||
provider,
|
||||
dryRun: false,
|
||||
});
|
||||
|
||||
if (!extracted.ok) {
|
||||
const errText = JSON.stringify(extracted.error);
|
||||
return {
|
||||
content: `Analyzer LLM error: ${errText}`,
|
||||
meta: {
|
||||
analysisMarkdown: null,
|
||||
providerModel: provider.model,
|
||||
errorMessage: errText,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const analysisMarkdown = extracted.value.analysisMarkdown;
|
||||
return {
|
||||
content: analysisMarkdown,
|
||||
meta: {
|
||||
analysisMarkdown,
|
||||
providerModel: provider.model,
|
||||
errorMessage: null,
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
async writer(
|
||||
start: StartStep,
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<PrSummarizerMeta["writer"]>> {
|
||||
const last = messages[messages.length - 1];
|
||||
const am = last.meta as PrSummarizerMeta["analyzer"];
|
||||
|
||||
const errOut = (msg: string): RoleResult<PrSummarizerMeta["writer"]> => ({
|
||||
content: `## 错误\n\n${msg}`,
|
||||
meta: {
|
||||
summaryZhMarkdown: `## 错误\n\n${msg}`,
|
||||
errorMessage: msg,
|
||||
},
|
||||
});
|
||||
|
||||
if (last.role !== "analyzer") {
|
||||
return errOut("上一则消息不是 analyzer 输出,无法生成总结。");
|
||||
}
|
||||
|
||||
if (am.errorMessage !== null) {
|
||||
return errOut(`分析阶段失败,未生成臆造总结:${am.errorMessage}`);
|
||||
}
|
||||
|
||||
const analysis = am.analysisMarkdown;
|
||||
if (analysis === null || analysis.trim() === "") {
|
||||
return errOut("分析正文为空,无法生成中文总结。");
|
||||
}
|
||||
|
||||
if (isDryRun(start)) {
|
||||
const stub = "## dryRun\n\n未调用模型生成中文总结。";
|
||||
return {
|
||||
content: stub,
|
||||
meta: { summaryZhMarkdown: stub, errorMessage: null },
|
||||
};
|
||||
}
|
||||
|
||||
const provider = await resolveDashScopeProvider();
|
||||
if (provider === null) {
|
||||
const stub =
|
||||
`## 中文摘要(无 LLM)\n\n` +
|
||||
`以下为上游分析原文摘录,请配置 DASHSCOPE 相关凭据以生成压缩中文总结。\n\n${analysis.slice(0, 8000)}`;
|
||||
return {
|
||||
content: stub,
|
||||
meta: { summaryZhMarkdown: stub, errorMessage: null },
|
||||
};
|
||||
}
|
||||
|
||||
const writerPrompt =
|
||||
`将下列 PR 技术分析改写为**中文 Markdown**交付物,包含:\n` +
|
||||
`- 标题(含仓库与 PR 编号)\n` +
|
||||
`- 变更要点(条列)\n` +
|
||||
`- 风险与注意事项\n` +
|
||||
`- 测试建议\n\n` +
|
||||
`---\n${analysis}`;
|
||||
|
||||
const extracted = await llmExtract({
|
||||
text: writerPrompt,
|
||||
schema: summaryExtractSchema,
|
||||
provider,
|
||||
dryRun: false,
|
||||
});
|
||||
|
||||
if (!extracted.ok) {
|
||||
const msg = JSON.stringify(extracted.error);
|
||||
return errOut(`Writer LLM 失败: ${msg}`);
|
||||
}
|
||||
|
||||
const summaryZhMarkdown = extracted.value.summaryZhMarkdown;
|
||||
return {
|
||||
content: summaryZhMarkdown,
|
||||
meta: {
|
||||
summaryZhMarkdown,
|
||||
errorMessage: null,
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
moderator(context: ModeratorContext<PrSummarizerMeta>) {
|
||||
if (context.steps.length === 0) {
|
||||
return "fetcher";
|
||||
}
|
||||
const signal = context.steps[context.steps.length - 1];
|
||||
if (signal.role === "fetcher") {
|
||||
return "analyzer";
|
||||
}
|
||||
if (signal.role === "analyzer") {
|
||||
return "writer";
|
||||
}
|
||||
if (signal.role === "writer") {
|
||||
return END;
|
||||
}
|
||||
return END;
|
||||
},
|
||||
};
|
||||
|
||||
export default workflow;
|
||||
21
workflows/pr-summarizer/package.json
Normal file
21
workflows/pr-summarizer/package.json
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
"name": "pr-summarizer-workflow",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"@uncaged/nerve-core": "latest",
|
||||
"@uncaged/nerve-workflow-utils": "latest",
|
||||
"zod": "^4.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.0.0"
|
||||
},
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"@uncaged/nerve-daemon": "link:../../../repos/nerve/packages/daemon",
|
||||
"@uncaged/nerve-core": "link:../../../repos/nerve/packages/core",
|
||||
"@uncaged/nerve-workflow-utils": "link:../../../repos/nerve/packages/workflow-utils"
|
||||
}
|
||||
}
|
||||
}
|
||||
49
workflows/pr-summarizer/pnpm-lock.yaml
generated
Normal file
49
workflows/pr-summarizer/pnpm-lock.yaml
generated
Normal file
@ -0,0 +1,49 @@
|
||||
lockfileVersion: '9.0'
|
||||
|
||||
settings:
|
||||
autoInstallPeers: true
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
overrides:
|
||||
'@uncaged/nerve-daemon': link:../../../repos/nerve/packages/daemon
|
||||
'@uncaged/nerve-core': link:../../../repos/nerve/packages/core
|
||||
'@uncaged/nerve-workflow-utils': link:../../../repos/nerve/packages/workflow-utils
|
||||
|
||||
importers:
|
||||
|
||||
.:
|
||||
dependencies:
|
||||
'@uncaged/nerve-core':
|
||||
specifier: link:../../../repos/nerve/packages/core
|
||||
version: link:../../../repos/nerve/packages/core
|
||||
'@uncaged/nerve-workflow-utils':
|
||||
specifier: link:../../../repos/nerve/packages/workflow-utils
|
||||
version: link:../../../repos/nerve/packages/workflow-utils
|
||||
zod:
|
||||
specifier: ^4.3.6
|
||||
version: 4.3.6
|
||||
devDependencies:
|
||||
'@types/node':
|
||||
specifier: ^22.0.0
|
||||
version: 22.19.17
|
||||
|
||||
packages:
|
||||
|
||||
'@types/node@22.19.17':
|
||||
resolution: {integrity: sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==}
|
||||
|
||||
undici-types@6.21.0:
|
||||
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
|
||||
|
||||
zod@4.3.6:
|
||||
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
|
||||
|
||||
snapshots:
|
||||
|
||||
'@types/node@22.19.17':
|
||||
dependencies:
|
||||
undici-types: 6.21.0
|
||||
|
||||
undici-types@6.21.0: {}
|
||||
|
||||
zod@4.3.6: {}
|
||||
13
workflows/pr-summarizer/tsconfig.json
Normal file
13
workflows/pr-summarizer/tsconfig.json
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"lib": ["ES2022"],
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"noEmit": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["./**/*.ts"]
|
||||
}
|
||||
@ -1,43 +1,96 @@
|
||||
import type { WorkflowDefinition } from "@uncaged/nerve-daemon";
|
||||
import { execSync } from "node:child_process";
|
||||
import { readFileSync, existsSync } from "node:fs";
|
||||
import type {
|
||||
RoleResult,
|
||||
StartStep,
|
||||
WorkflowDefinition,
|
||||
WorkflowMessage,
|
||||
} from "@uncaged/nerve-core";
|
||||
import { END } from "@uncaged/nerve-core";
|
||||
import type { SpawnError } from "@uncaged/nerve-workflow-utils";
|
||||
import {
|
||||
cursorAgent,
|
||||
llmExtract,
|
||||
nerveAgentContext,
|
||||
readNerveYaml,
|
||||
spawnSafe,
|
||||
} from "@uncaged/nerve-workflow-utils";
|
||||
import { existsSync, readFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { z } from "zod";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
const SENSES_DIR = join(NERVE_ROOT, "senses");
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function nerveCommandEnv(): NodeJS.ProcessEnv {
|
||||
const pnpmHome = join(HOME, ".local/share/pnpm");
|
||||
const npmUserBin = join(HOME, ".local/share/npm/bin");
|
||||
return {
|
||||
...process.env,
|
||||
PNPM_HOME: pnpmHome,
|
||||
PATH: `${npmUserBin}:${pnpmHome}:${process.env.PATH ?? ""}`,
|
||||
};
|
||||
function getNerveYaml(): string {
|
||||
const result = readNerveYaml({ nerveRoot: NERVE_ROOT });
|
||||
return result.ok ? result.value : "# nerve.yaml unavailable";
|
||||
}
|
||||
|
||||
function run(cmd: string, cwd?: string): string {
|
||||
return execSync(cmd, {
|
||||
encoding: "utf-8",
|
||||
cwd: cwd ?? NERVE_ROOT,
|
||||
timeout: 300_000,
|
||||
env: nerveCommandEnv(),
|
||||
}).trim();
|
||||
async function cfgGet(key: string): Promise<string | null> {
|
||||
const result = await spawnSafe("cfg", ["get", key], {
|
||||
cwd: NERVE_ROOT,
|
||||
env: null,
|
||||
timeoutMs: 10_000,
|
||||
});
|
||||
if (!result.ok) {
|
||||
return null;
|
||||
}
|
||||
return result.value.stdout.trim() || null;
|
||||
}
|
||||
|
||||
async function resolveDashScopeProvider(): Promise<{
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
model: string;
|
||||
} | null> {
|
||||
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet("DASHSCOPE_API_KEY"));
|
||||
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet("DASHSCOPE_BASE_URL"));
|
||||
const model =
|
||||
process.env.DASHSCOPE_MODEL ?? (await cfgGet("DASHSCOPE_MODEL")) ?? "qwen-plus";
|
||||
if (!apiKey || !baseUrl) {
|
||||
return null;
|
||||
}
|
||||
return { apiKey, baseUrl, model };
|
||||
}
|
||||
|
||||
function formatSpawnFailure(error: SpawnError): string {
|
||||
if (error.kind === "spawn_failed") {
|
||||
return error.message;
|
||||
}
|
||||
if (error.kind === "timeout") {
|
||||
return `timeout (stdout=${error.stdout.slice(0, 200)})`;
|
||||
}
|
||||
return `exit ${error.exitCode} stderr=${error.stderr.slice(0, 400)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the same checks the workflow used to ask Hermes to perform, but locally.
|
||||
* Hermes chat often returns UI prose instead of shell output, which caused false failures.
|
||||
*/
|
||||
function runSenseSmokeTest(senseName: string): { ok: boolean; log: string; reason: string } {
|
||||
async function runSenseSmokeTest(senseName: string): Promise<{ ok: boolean; log: string; reason: string }> {
|
||||
const logParts: string[] = [];
|
||||
try {
|
||||
const status = run("nerve status");
|
||||
|
||||
const runNerve = async (args: string[]): Promise<{ ok: true; out: string } | { ok: false; err: string }> => {
|
||||
const result = await spawnSafe("nerve", args, {
|
||||
cwd: NERVE_ROOT,
|
||||
env: null,
|
||||
timeoutMs: 300_000,
|
||||
});
|
||||
if (!result.ok) {
|
||||
return { ok: false, err: formatSpawnFailure(result.error) };
|
||||
}
|
||||
return { ok: true, out: result.value.stdout };
|
||||
};
|
||||
|
||||
const statusRun = await runNerve(["status"]);
|
||||
if (!statusRun.ok) {
|
||||
return {
|
||||
ok: false,
|
||||
log: `=== nerve status ===\nERROR: ${statusRun.err}`,
|
||||
reason: `Smoke test command failed: ${statusRun.err}`,
|
||||
};
|
||||
}
|
||||
const status = statusRun.out;
|
||||
logParts.push("=== nerve status ===\n" + status);
|
||||
if (!status.includes(senseName)) {
|
||||
return {
|
||||
@ -47,18 +100,29 @@ function runSenseSmokeTest(senseName: string): { ok: boolean; log: string; reaso
|
||||
};
|
||||
}
|
||||
|
||||
const triggerOut = run(`nerve sense trigger ${senseName}`);
|
||||
logParts.push("=== nerve sense trigger ===\n" + triggerOut);
|
||||
const triggerRun = await runNerve(["sense", "trigger", senseName]);
|
||||
if (!triggerRun.ok) {
|
||||
logParts.push(`=== nerve sense trigger ===\nERROR: ${triggerRun.err}`);
|
||||
return {
|
||||
ok: false,
|
||||
log: logParts.join("\n\n"),
|
||||
reason: `Smoke test command failed: ${triggerRun.err}`,
|
||||
};
|
||||
}
|
||||
logParts.push("=== nerve sense trigger ===\n" + triggerRun.out);
|
||||
|
||||
let lastQuery = "";
|
||||
for (let i = 0; i < 25; i++) {
|
||||
run("sleep 1");
|
||||
try {
|
||||
lastQuery = run(`nerve sense query ${senseName}`);
|
||||
} catch (e) {
|
||||
logParts.push(`=== nerve sense query (attempt ${i + 1}) ===\nERROR: ${String(e)}`);
|
||||
continue;
|
||||
const sleepR = await spawnSafe("sleep", ["1"], { cwd: NERVE_ROOT, env: null, timeoutMs: 10_000 });
|
||||
if (!sleepR.ok) {
|
||||
logParts.push(`=== sleep (attempt ${i + 1}) ===\nERROR: ${formatSpawnFailure(sleepR.error)}`);
|
||||
}
|
||||
|
||||
const queryRun = await runNerve(["sense", "query", senseName]);
|
||||
if (!queryRun.ok) {
|
||||
logParts.push(`=== nerve sense query (attempt ${i + 1}) ===\nERROR: ${queryRun.err}`);
|
||||
} else {
|
||||
lastQuery = queryRun.out;
|
||||
logParts.push(`=== nerve sense query (attempt ${i + 1}) ===\n${lastQuery}`);
|
||||
if (!lastQuery.includes("(0 rows)")) {
|
||||
return {
|
||||
@ -68,6 +132,7 @@ function runSenseSmokeTest(senseName: string): { ok: boolean; log: string; reaso
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
ok: false,
|
||||
@ -76,52 +141,6 @@ function runSenseSmokeTest(senseName: string): { ok: boolean; log: string; reaso
|
||||
? "Query still returned 0 rows after trigger (compute error, throttle drop, or DB not written)"
|
||||
: "Timed out waiting for successful sense query",
|
||||
};
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
return {
|
||||
ok: false,
|
||||
log: logParts.join("\n\n"),
|
||||
reason: `Smoke test command failed: ${msg}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call a cheap LLM with tool_choice to extract structured metadata from text.
|
||||
* Uses DashScope (Alibaba Cloud, OpenAI-compatible) with qwen-plus.
|
||||
*/
|
||||
function llmExtract<T>(text: string, toolName: string, toolDescription: string, parameters: Record<string, unknown>): T {
|
||||
const apiKey = run("bash -c 'source ~/.profile && cfg get DASHSCOPE_API_KEY'");
|
||||
const baseUrl = run("bash -c 'source ~/.profile && cfg get DASHSCOPE_BASE_URL'");
|
||||
|
||||
const body = JSON.stringify({
|
||||
model: "qwen-plus",
|
||||
messages: [
|
||||
{ role: "system", content: "Extract the requested information from the provided text. Be precise." },
|
||||
{ role: "user", content: text },
|
||||
],
|
||||
tools: [{
|
||||
type: "function" as const,
|
||||
function: { name: toolName, description: toolDescription, parameters },
|
||||
}],
|
||||
tool_choice: { type: "function" as const, function: { name: toolName } },
|
||||
});
|
||||
|
||||
const escaped = body.replace(/'/g, "'\\''");
|
||||
const result = run(`curl -s '${baseUrl}/chat/completions' -H 'Authorization: Bearer ${apiKey}' -H 'Content-Type: application/json' -d '${escaped}'`);
|
||||
const parsed = JSON.parse(result);
|
||||
const toolCall = parsed.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments;
|
||||
if (!toolCall) throw new Error(`llmExtract failed: ${result.slice(0, 500)}`);
|
||||
return JSON.parse(toolCall) as T;
|
||||
}
|
||||
|
||||
function cursorAgent(prompt: string, mode: "plan" | "ask" | "default", cwd: string): string {
|
||||
const escaped = prompt.replace(/'/g, "'\\''");
|
||||
const modeFlag = mode === "plan" ? " --mode=plan" : mode === "ask" ? " --mode=ask" : "";
|
||||
return run(
|
||||
`cursor-agent -p '${escaped}' --model auto${modeFlag} --output-format text --trust --force`,
|
||||
cwd,
|
||||
);
|
||||
}
|
||||
|
||||
// Build context string with existing sense examples
|
||||
@ -151,27 +170,43 @@ function buildSenseExamples(): string {
|
||||
return examples.join("\n\n---\n\n");
|
||||
}
|
||||
|
||||
// Read current nerve.yaml
|
||||
function readNerveYaml(): string {
|
||||
return readFileSync(join(NERVE_ROOT, "nerve.yaml"), "utf-8");
|
||||
type SenseMeta = {
|
||||
planner: { plan: string; senseName: string; userInput: string };
|
||||
coder: { senseName: string; files: Record<string, boolean>; cursorOutput: string };
|
||||
tester: { passed: boolean; senseName: string; reason: string; attempt: number };
|
||||
};
|
||||
|
||||
const senseMetaSchema = z
|
||||
.object({
|
||||
name: z.string().describe("kebab-case sense name, e.g. 'disk-usage'"),
|
||||
description: z.string().describe("One-line description of what this sense monitors"),
|
||||
})
|
||||
.describe("Extract the sense name and a one-line description from the plan");
|
||||
|
||||
const workflow: WorkflowDefinition<SenseMeta> = {
|
||||
name: "sense-generator",
|
||||
|
||||
roles: {
|
||||
async planner(
|
||||
start: StartStep,
|
||||
_messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<SenseMeta["planner"]>> {
|
||||
const userInput = start.content;
|
||||
|
||||
const provider = await resolveDashScopeProvider();
|
||||
if (provider === null) {
|
||||
return {
|
||||
content:
|
||||
"Cannot run planner: set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or configure via `cfg get`), " +
|
||||
"and optionally DASHSCOPE_MODEL.",
|
||||
meta: { plan: "", senseName: "", userInput },
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Workflow Definition
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const workflow: WorkflowDefinition = {
|
||||
roles: {
|
||||
// -----------------------------------------------------------------------
|
||||
// PLANNER: Generates a structured plan for the sense
|
||||
// -----------------------------------------------------------------------
|
||||
planner: {
|
||||
async execute(prompt: unknown, ctx) {
|
||||
const userInput = String(prompt);
|
||||
ctx.log(`planner: designing sense from input: "${userInput.substring(0, 100)}"`);
|
||||
|
||||
const planPrompt = `You are planning a new Nerve sense.
|
||||
|
||||
${nerveAgentContext}
|
||||
|
||||
User request: ${userInput}
|
||||
Pick a good kebab-case name for this sense.
|
||||
|
||||
@ -200,45 +235,50 @@ ${buildSenseExamples()}
|
||||
|
||||
Current nerve.yaml:
|
||||
\`\`\`yaml
|
||||
${readNerveYaml()}
|
||||
${getNerveYaml()}
|
||||
\`\`\`
|
||||
|
||||
Output ONLY the plan in markdown. Be precise and implementation-ready.`;
|
||||
|
||||
const plan = cursorAgent(planPrompt, "ask", NERVE_ROOT);
|
||||
ctx.log(`planner: plan generated (${plan.length} chars)`);
|
||||
|
||||
// Extract sense metadata from plan using structured LLM call
|
||||
const meta = llmExtract<{ name: string; description: string }>(
|
||||
plan,
|
||||
"extract_sense_metadata",
|
||||
"Extract the sense name and a one-line description from the plan",
|
||||
{
|
||||
type: "object",
|
||||
properties: {
|
||||
name: { type: "string", description: "kebab-case sense name, e.g. 'disk-usage'" },
|
||||
description: { type: "string", description: "One-line description of what this sense monitors" },
|
||||
},
|
||||
required: ["name", "description"],
|
||||
},
|
||||
);
|
||||
const senseName = meta.name;
|
||||
ctx.log(`planner: extracted sense name="${senseName}", desc="${meta.description}"`);
|
||||
|
||||
return { type: "plan_ready", plan, senseName, userInput };
|
||||
},
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// CODER: Generates sense files + updates nerve.yaml
|
||||
// -----------------------------------------------------------------------
|
||||
coder: {
|
||||
async execute(prompt: unknown, ctx) {
|
||||
const { plan, senseName } = prompt as {
|
||||
plan: string;
|
||||
senseName: string;
|
||||
const planResult = await cursorAgent({
|
||||
prompt: planPrompt,
|
||||
mode: "ask",
|
||||
cwd: NERVE_ROOT,
|
||||
env: null,
|
||||
timeoutMs: null,
|
||||
});
|
||||
if (!planResult.ok) {
|
||||
return {
|
||||
content: `cursor-agent failed: ${formatSpawnFailure(planResult.error)}`,
|
||||
meta: { plan: "", senseName: "", userInput },
|
||||
};
|
||||
ctx.log(`coder: implementing sense "${senseName}"`);
|
||||
}
|
||||
const plan = planResult.value;
|
||||
|
||||
const extracted = await llmExtract({
|
||||
text: plan,
|
||||
schema: senseMetaSchema,
|
||||
provider,
|
||||
});
|
||||
if (!extracted.ok) {
|
||||
return {
|
||||
content: `${plan}\n\n[llmExtract error] ${JSON.stringify(extracted.error)}`,
|
||||
meta: { plan, senseName: "", userInput },
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
content: plan,
|
||||
meta: { plan, senseName: extracted.value.name, userInput },
|
||||
};
|
||||
},
|
||||
|
||||
async coder(
|
||||
_start: StartStep,
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<SenseMeta["coder"]>> {
|
||||
const last = messages[messages.length - 1];
|
||||
const { plan, senseName } = last.meta as { plan: string; senseName: string };
|
||||
|
||||
const codePrompt = `You are implementing a new Nerve sense called "${senseName}" in the directory ${SENSES_DIR}/${senseName}/.
|
||||
|
||||
@ -260,7 +300,7 @@ ${buildSenseExamples()}
|
||||
|
||||
Current nerve.yaml (append to it, don't overwrite existing entries):
|
||||
\`\`\`yaml
|
||||
${readNerveYaml()}
|
||||
${getNerveYaml()}
|
||||
\`\`\`
|
||||
|
||||
IMPORTANT RULES:
|
||||
@ -273,10 +313,26 @@ IMPORTANT RULES:
|
||||
|
||||
Create all files now.`;
|
||||
|
||||
const result = cursorAgent(codePrompt, "default", NERVE_ROOT);
|
||||
ctx.log(`coder: implementation done`);
|
||||
const agentResult = await cursorAgent({
|
||||
prompt: codePrompt,
|
||||
mode: "default",
|
||||
cwd: NERVE_ROOT,
|
||||
env: null,
|
||||
timeoutMs: null,
|
||||
});
|
||||
if (!agentResult.ok) {
|
||||
const resultText = `cursor-agent failed: ${formatSpawnFailure(agentResult.error)}`;
|
||||
return {
|
||||
content: resultText,
|
||||
meta: {
|
||||
senseName,
|
||||
files: { index: false, schema: false, migration: false },
|
||||
cursorOutput: resultText,
|
||||
},
|
||||
};
|
||||
}
|
||||
const result = agentResult.value;
|
||||
|
||||
// Verify files were created
|
||||
const senseDir = join(SENSES_DIR, senseName);
|
||||
const files = {
|
||||
index: existsSync(join(senseDir, "index.js")),
|
||||
@ -284,110 +340,76 @@ Create all files now.`;
|
||||
migration: existsSync(join(senseDir, "migrations", "0001_init.sql")),
|
||||
};
|
||||
|
||||
ctx.log(`coder: files created — index:${files.index} schema:${files.schema} migration:${files.migration}`);
|
||||
|
||||
return {
|
||||
type: "code_ready",
|
||||
senseName,
|
||||
files,
|
||||
cursorOutput: result,
|
||||
content: result,
|
||||
meta: { senseName, files, cursorOutput: result },
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TESTER: Triggers the sense and validates the result
|
||||
// -----------------------------------------------------------------------
|
||||
tester: {
|
||||
async execute(prompt: unknown, ctx) {
|
||||
const { senseName, files, attempt = 1 } = prompt as {
|
||||
senseName: string;
|
||||
files: Record<string, boolean>;
|
||||
attempt?: number;
|
||||
};
|
||||
ctx.log(`tester: validating sense "${senseName}" (attempt ${attempt})`);
|
||||
async tester(
|
||||
_start: StartStep,
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<SenseMeta["tester"]>> {
|
||||
const last = messages[messages.length - 1];
|
||||
const { senseName, files } = last.meta as { senseName: string; files: Record<string, boolean> };
|
||||
|
||||
const attempt = messages.filter((m) => m.role === "tester").length + 1;
|
||||
|
||||
// Check all files exist
|
||||
const missing = Object.entries(files).filter(([, v]) => !v).map(([k]) => k);
|
||||
if (missing.length > 0) {
|
||||
ctx.log(`tester: FAIL — missing files: ${missing.join(", ")}`);
|
||||
return {
|
||||
type: "test_failed",
|
||||
senseName,
|
||||
reason: `Missing files: ${missing.join(", ")}`,
|
||||
attempt,
|
||||
content: `FAIL — missing files: ${missing.join(", ")}`,
|
||||
meta: { passed: false, senseName, reason: `Missing files: ${missing.join(", ")}`, attempt },
|
||||
};
|
||||
}
|
||||
|
||||
const smoke = runSenseSmokeTest(senseName);
|
||||
ctx.log(`tester: smoke — ok=${smoke.ok}, reason="${smoke.reason}"`);
|
||||
ctx.log(`tester: log head — ${smoke.log.substring(0, 400)}`);
|
||||
const smoke = await runSenseSmokeTest(senseName);
|
||||
|
||||
if (smoke.ok) {
|
||||
return { type: "test_passed", senseName, result: smoke.reason };
|
||||
return {
|
||||
content: `PASS — ${smoke.reason}`,
|
||||
meta: { passed: true, senseName, reason: smoke.reason, attempt },
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
type: "test_failed",
|
||||
content: `FAIL — ${smoke.reason}`,
|
||||
meta: {
|
||||
passed: false,
|
||||
senseName,
|
||||
reason: `${smoke.reason}\n\n--- smoke log ---\n${smoke.log}`,
|
||||
attempt,
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MODERATOR: Routes the workflow through planner → coder → tester
|
||||
// -------------------------------------------------------------------------
|
||||
moderate(thread, event) {
|
||||
// Initial trigger
|
||||
if (event.type === "thread_start") {
|
||||
return { role: "planner", prompt: event.triggerPayload ?? "" };
|
||||
moderator(context) {
|
||||
if (context.steps.length === 0) {
|
||||
return "planner";
|
||||
}
|
||||
|
||||
// Plan is ready → hand to coder
|
||||
if (event.type === "plan_ready") {
|
||||
return {
|
||||
role: "coder",
|
||||
prompt: { plan: event.plan, senseName: event.senseName },
|
||||
};
|
||||
const signal = context.steps[context.steps.length - 1];
|
||||
if (signal.role === "planner") {
|
||||
return "coder";
|
||||
}
|
||||
|
||||
// Code is ready → hand to tester
|
||||
if (event.type === "code_ready") {
|
||||
return {
|
||||
role: "tester",
|
||||
prompt: { senseName: event.senseName, files: event.files },
|
||||
};
|
||||
if (signal.role === "coder") {
|
||||
return "tester";
|
||||
}
|
||||
|
||||
// Test failed → retry coder (max 2 retries)
|
||||
if (event.type === "test_failed") {
|
||||
const attempt = (event.attempt as number) ?? 1;
|
||||
if (attempt < 3) {
|
||||
// Find the plan from history
|
||||
const planEvent = thread.events.find((e) => e.type === "plan_ready");
|
||||
if (planEvent) {
|
||||
return {
|
||||
role: "coder",
|
||||
prompt: {
|
||||
plan: `${planEvent.plan}\n\n## PREVIOUS FAILURE (attempt ${attempt}):\n${event.reason}\n\nFix the issues above.`,
|
||||
senseName: event.senseName,
|
||||
},
|
||||
};
|
||||
if (signal.role === "tester") {
|
||||
const meta = signal.meta;
|
||||
if (meta.passed) {
|
||||
return END;
|
||||
}
|
||||
if (meta.attempt < 3) {
|
||||
return "coder";
|
||||
}
|
||||
// Give up after 3 attempts
|
||||
return null;
|
||||
return END;
|
||||
}
|
||||
|
||||
// Test passed → done
|
||||
if (event.type === "test_passed") {
|
||||
return null;
|
||||
}
|
||||
|
||||
return null;
|
||||
return END;
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@ -306,6 +306,20 @@ Output a thorough analysis in markdown. Do not write final implementation code.`
|
||||
provider,
|
||||
dryRun: dry,
|
||||
});
|
||||
if (dry) {
|
||||
return {
|
||||
content: "[dry-run] analyst complete",
|
||||
meta: {
|
||||
...empty,
|
||||
analysis: analysis || "(dry-run)",
|
||||
workflowName: "dry-run-test",
|
||||
roles: [{ name: "placeholder", description: "dry-run role", responsibilities: "n/a" }],
|
||||
moderatorFlow: "placeholder → END",
|
||||
externalDeps: "none",
|
||||
dataFlow: "n/a",
|
||||
},
|
||||
};
|
||||
}
|
||||
if (!extracted.ok) {
|
||||
return {
|
||||
content: `${analysis}\n\n[llmExtract error] ${JSON.stringify(extracted.error)}`,
|
||||
@ -356,6 +370,12 @@ Output a thorough analysis in markdown. Do not write final implementation code.`
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<WorkflowGenMeta["architect"]>> {
|
||||
const dry = isDryRun(start);
|
||||
if (dry) {
|
||||
return {
|
||||
content: "[dry-run] architect complete",
|
||||
meta: { workflowName: "dry-run-test", design: "(dry-run design)" },
|
||||
};
|
||||
}
|
||||
const last = messages[messages.length - 1];
|
||||
const spec = last.meta as WorkflowGenMeta["analyst"];
|
||||
const wfName = spec.workflowName.trim();
|
||||
@ -467,6 +487,12 @@ Output ONLY the design markdown.`;
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<WorkflowGenMeta["coder"]>> {
|
||||
const dry = isDryRun(start);
|
||||
if (dry) {
|
||||
return {
|
||||
content: "[dry-run] coder complete",
|
||||
meta: { workflowName: "dry-run-test", generatedFiles: ["(dry-run)"], codegenLog: "(dry-run)" },
|
||||
};
|
||||
}
|
||||
const analystMeta = lastMetaForRole<WorkflowGenMeta["analyst"]>(messages, "analyst");
|
||||
const architectMeta = lastMetaForRole<WorkflowGenMeta["architect"]>(messages, "architect");
|
||||
const priorReviewer = lastMetaForRole<WorkflowGenMeta["reviewer"]>(messages, "reviewer");
|
||||
@ -583,6 +609,12 @@ Implement now.`;
|
||||
messages: WorkflowMessage[],
|
||||
): Promise<RoleResult<WorkflowGenMeta["reviewer"]>> {
|
||||
const dry = isDryRun(start);
|
||||
if (dry) {
|
||||
return {
|
||||
content: "[dry-run] reviewer complete — LGTM",
|
||||
meta: { workflowName: "dry-run-test", approved: true, issues: "" },
|
||||
};
|
||||
}
|
||||
const last = messages[messages.length - 1];
|
||||
const { workflowName, files } = last.meta as WorkflowGenMeta["coder"];
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user