fix: restore CLI-triggered workflows, only remove restart-gateway
The previous commit incorrectly deleted all workflows. Only restart-gateway should be removed (replaced by direct shell trigger). Other workflows (solve-issue, extract-knowledge, develop-sense, develop-workflow) are CLI-triggered and independent of sense coupling.
This commit is contained in:
parent
c71212a0ce
commit
a4625a4559
14
nerve.yaml
14
nerve.yaml
@ -10,3 +10,17 @@ senses:
|
||||
interval: 2m
|
||||
throttle: 30s
|
||||
timeout: 30s
|
||||
|
||||
workflows:
|
||||
develop-sense:
|
||||
concurrency: 1
|
||||
overflow: queue
|
||||
develop-workflow:
|
||||
concurrency: 1
|
||||
overflow: queue
|
||||
solve-issue:
|
||||
concurrency: 1
|
||||
overflow: queue
|
||||
extract-knowledge:
|
||||
concurrency: 1
|
||||
overflow: queue
|
||||
|
||||
33
workflows/develop-sense/index.ts
Normal file
33
workflows/develop-sense/index.ts
Normal file
@ -0,0 +1,33 @@
|
||||
import { join } from "node:path";
|
||||
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
|
||||
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
|
||||
import { createDevelopSenseWorkflow } from "@uncaged/nerve-workflow-meta";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
|
||||
const apiKey = process.env.DASHSCOPE_API_KEY;
|
||||
const baseUrl = process.env.DASHSCOPE_BASE_URL;
|
||||
const model = process.env.DASHSCOPE_MODEL ?? "qwen-plus";
|
||||
if (!apiKey || !baseUrl) {
|
||||
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
|
||||
}
|
||||
|
||||
const CURSOR_TIMEOUT_MS = 300_000;
|
||||
|
||||
const workflow = createDevelopSenseWorkflow({
|
||||
defaultAdapter: hermesAdapter,
|
||||
adapters: {
|
||||
planner: createCursorAdapter({
|
||||
type: "cursor",
|
||||
mode: "ask",
|
||||
model: "auto",
|
||||
timeout: CURSOR_TIMEOUT_MS,
|
||||
}),
|
||||
coder: cursorAdapter,
|
||||
},
|
||||
extract: { provider: { apiKey, baseUrl, model } },
|
||||
cwd: NERVE_ROOT,
|
||||
});
|
||||
|
||||
export default workflow;
|
||||
34
workflows/develop-workflow/index.ts
Normal file
34
workflows/develop-workflow/index.ts
Normal file
@ -0,0 +1,34 @@
|
||||
import { join } from "node:path";
|
||||
import { createCursorAdapter, cursorAdapter } from "@uncaged/nerve-adapter-cursor";
|
||||
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
|
||||
import { createDevelopWorkflowWorkflow } from "@uncaged/nerve-workflow-meta";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
|
||||
const apiKey = process.env.DASHSCOPE_API_KEY;
|
||||
const baseUrl = process.env.DASHSCOPE_BASE_URL;
|
||||
const model = process.env.DASHSCOPE_MODEL ?? "qwen-plus";
|
||||
|
||||
if (!apiKey || !baseUrl) {
|
||||
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL");
|
||||
}
|
||||
|
||||
const CURSOR_TIMEOUT_MS = 300_000;
|
||||
|
||||
const workflow = createDevelopWorkflowWorkflow({
|
||||
defaultAdapter: hermesAdapter,
|
||||
adapters: {
|
||||
planner: createCursorAdapter({
|
||||
type: "cursor",
|
||||
mode: "ask",
|
||||
model: "auto",
|
||||
timeout: CURSOR_TIMEOUT_MS,
|
||||
}),
|
||||
coder: cursorAdapter,
|
||||
},
|
||||
extract: { provider: { apiKey, baseUrl, model } },
|
||||
nerveRoot: NERVE_ROOT,
|
||||
});
|
||||
|
||||
export default workflow;
|
||||
33
workflows/extract-knowledge/build.ts
Normal file
33
workflows/extract-knowledge/build.ts
Normal file
@ -0,0 +1,33 @@
|
||||
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createLlmAdapter } from "@uncaged/nerve-workflow-utils";
|
||||
|
||||
import { moderator } from "./moderator.js";
|
||||
import type { WorkflowMeta } from "./moderator.js";
|
||||
import { createAnswererRole } from "./roles/answerer.js";
|
||||
import { createExplorerRole } from "./roles/explorer.js";
|
||||
import { createQuestionerRole } from "./roles/questioner.js";
|
||||
|
||||
export type CreateKnowledgeExtractionDeps = {
|
||||
defaultAdapter: AgentFn;
|
||||
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
|
||||
extract: LlmExtractorConfig;
|
||||
};
|
||||
|
||||
export function createKnowledgeExtractionWorkflow({
|
||||
defaultAdapter,
|
||||
adapters,
|
||||
extract,
|
||||
}: CreateKnowledgeExtractionDeps): WorkflowDefinition<WorkflowMeta> {
|
||||
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
|
||||
const llmAdapter = createLlmAdapter(extract.provider);
|
||||
return {
|
||||
name: "extract-knowledge",
|
||||
roles: {
|
||||
questioner: createQuestionerRole(adapters?.questioner ?? llmAdapter, { extract }),
|
||||
answerer: createAnswererRole(adapters?.answerer ?? llmAdapter, { extract }),
|
||||
explorer: createExplorerRole(a("explorer"), { extract }),
|
||||
},
|
||||
moderator,
|
||||
};
|
||||
}
|
||||
30
workflows/extract-knowledge/index.ts
Normal file
30
workflows/extract-knowledge/index.ts
Normal file
@ -0,0 +1,30 @@
|
||||
import { join } from "node:path";
|
||||
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
|
||||
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
|
||||
import { createKnowledgeExtractionWorkflow } from "./build.js";
|
||||
import { resolveDashScopeProvider } from "../solve-issue/lib/provider.js";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
|
||||
const provider = await resolveDashScopeProvider(NERVE_ROOT);
|
||||
|
||||
if (provider === null) {
|
||||
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
|
||||
}
|
||||
|
||||
const CURSOR_TIMEOUT_MS = 300_000;
|
||||
|
||||
const workflow = createKnowledgeExtractionWorkflow({
|
||||
defaultAdapter: hermesAdapter,
|
||||
adapters: {
|
||||
explorer: createCursorAdapter({
|
||||
type: "cursor",
|
||||
model: "claude-sonnet-4",
|
||||
timeout: CURSOR_TIMEOUT_MS,
|
||||
}),
|
||||
},
|
||||
extract: { provider },
|
||||
});
|
||||
|
||||
export default workflow;
|
||||
74
workflows/extract-knowledge/lib/knowledge-queue.ts
Normal file
74
workflows/extract-knowledge/lib/knowledge-queue.ts
Normal file
@ -0,0 +1,74 @@
|
||||
import type { Dirent } from "node:fs";
|
||||
import { readdir } from "node:fs/promises";
|
||||
import { join } from "node:path";
|
||||
|
||||
import type { StartStep, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
|
||||
import type { ExplorerMeta } from "../roles/explorer.js";
|
||||
import type { QuestionerMeta } from "../roles/questioner.js";
|
||||
|
||||
async function walkMarkdownFiles(rootDir: string, base: string): Promise<string[]> {
|
||||
const out: string[] = [];
|
||||
let entries: Dirent[];
|
||||
try {
|
||||
entries = (await readdir(rootDir, { withFileTypes: true })) as Dirent[];
|
||||
} catch {
|
||||
return out;
|
||||
}
|
||||
for (const e of entries) {
|
||||
const name = e.name;
|
||||
const rel = base ? `${base}/${name}` : name;
|
||||
const full = join(rootDir, name);
|
||||
if (e.isDirectory()) {
|
||||
out.push(...(await walkMarkdownFiles(full, rel)));
|
||||
} else if (e.isFile() && name.endsWith(".md")) {
|
||||
out.push(rel.replace(/\\/g, "/"));
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
/** Enumerate all markdown files under `.knowledge/` as repo-relative paths; seed line first if present. */
|
||||
export async function bootstrapKnowledgeQueue(cwd: string, startContent: string): Promise<string[]> {
|
||||
const knowledgeDir = join(cwd, ".knowledge");
|
||||
const relFiles = await walkMarkdownFiles(knowledgeDir, "");
|
||||
const paths = relFiles.map((f) => `.knowledge/${f}`);
|
||||
const seed = startContent.trim().split(/\r?\n/u)[0]?.trim() ?? "";
|
||||
if (paths.length === 0 && seed.length > 0) {
|
||||
return [seed];
|
||||
}
|
||||
if (seed.length > 0 && paths.includes(seed)) {
|
||||
return [seed, ...paths.filter((p) => p !== seed)];
|
||||
}
|
||||
if (seed.length > 0 && !paths.includes(seed)) {
|
||||
return [seed, ...paths];
|
||||
}
|
||||
return [...paths].sort();
|
||||
}
|
||||
|
||||
function lastIndexOfRole(messages: WorkflowMessage[], role: string): number {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === role) return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/** Next queue for questioner: bootstrap, or continue after answerer / explorer. */
|
||||
export async function resolveQueueForQuestioner(
|
||||
start: StartStep,
|
||||
messages: WorkflowMessage[],
|
||||
cwd: string,
|
||||
): Promise<string[]> {
|
||||
const lastQi = lastIndexOfRole(messages, "questioner");
|
||||
if (lastQi === -1) {
|
||||
return bootstrapKnowledgeQueue(cwd, start.content);
|
||||
}
|
||||
const qMeta = messages[lastQi].meta as QuestionerMeta;
|
||||
const tail = messages.slice(lastQi + 1);
|
||||
const explorerMsg = tail.find((m) => m.role === "explorer");
|
||||
if (explorerMsg) {
|
||||
const eMeta = explorerMsg.meta as ExplorerMeta;
|
||||
return [...qMeta.remaining_queue, ...eMeta.new_cards];
|
||||
}
|
||||
return qMeta.remaining_queue;
|
||||
}
|
||||
21
workflows/extract-knowledge/lib/workdir.ts
Normal file
21
workflows/extract-knowledge/lib/workdir.ts
Normal file
@ -0,0 +1,21 @@
|
||||
import type { StartStep } from "@uncaged/nerve-core";
|
||||
|
||||
type StartMetaWithWorkdir = StartStep["meta"] & { workdir?: string | null };
|
||||
|
||||
/**
|
||||
* Resolve the target repo working directory.
|
||||
* Priority: start.meta.workdir → prompt second line (if absolute path) → cwd.
|
||||
*/
|
||||
export function resolveWorkdir(start: StartStep): string {
|
||||
const m = start.meta as StartMetaWithWorkdir;
|
||||
if (m.workdir) return m.workdir;
|
||||
|
||||
// Allow prompt to carry workdir on the second line: "seed\n/abs/path"
|
||||
const lines = start.content.split(/\r?\n/);
|
||||
if (lines.length >= 2) {
|
||||
const candidate = lines[1]!.trim();
|
||||
if (candidate.startsWith("/")) return candidate;
|
||||
}
|
||||
|
||||
return process.cwd();
|
||||
}
|
||||
84
workflows/extract-knowledge/moderator.ts
Normal file
84
workflows/extract-knowledge/moderator.ts
Normal file
@ -0,0 +1,84 @@
|
||||
import { END } from "@uncaged/nerve-core";
|
||||
import type { Moderator, ThreadContext } from "@uncaged/nerve-core";
|
||||
|
||||
import type { AnswererMeta } from "./roles/answerer.js";
|
||||
import type { ExplorerMeta } from "./roles/explorer.js";
|
||||
import type { QuestionerMeta } from "./roles/questioner.js";
|
||||
|
||||
export type WorkflowMeta = {
|
||||
questioner: QuestionerMeta;
|
||||
answerer: AnswererMeta;
|
||||
explorer: ExplorerMeta;
|
||||
};
|
||||
|
||||
type Steps = ThreadContext<WorkflowMeta>["steps"];
|
||||
|
||||
function lastQuestionerRemaining(steps: Steps): QuestionerMeta | undefined {
|
||||
for (let i = steps.length - 1; i >= 0; i--) {
|
||||
const s = steps[i];
|
||||
if (s.role === "questioner") return s.meta;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/** End when the last two explorer invocations both added no new cards (issue #266 stagnation rule). */
|
||||
function lastTwoExplorerRunsBothEmpty(steps: Steps): boolean {
|
||||
const explorerSteps = steps.filter((s) => s.role === "explorer");
|
||||
if (explorerSteps.length < 2) return false;
|
||||
const e1 = explorerSteps[explorerSteps.length - 1].meta as ExplorerMeta;
|
||||
const e2 = explorerSteps[explorerSteps.length - 2].meta as ExplorerMeta;
|
||||
return e1.new_cards.length === 0 && e2.new_cards.length === 0;
|
||||
}
|
||||
|
||||
function queueAfterSkippedExplorer(steps: Steps): string[] {
|
||||
const q = lastQuestionerRemaining(steps);
|
||||
return q?.remaining_queue ?? [];
|
||||
}
|
||||
|
||||
function queueAfterExplorerStep(steps: Steps): string[] {
|
||||
const last = steps[steps.length - 1];
|
||||
if (!last || last.role !== "explorer") return [];
|
||||
const q = lastQuestionerRemaining(steps);
|
||||
if (!q) return [];
|
||||
const e = last.meta as ExplorerMeta;
|
||||
return [...q.remaining_queue, ...e.new_cards];
|
||||
}
|
||||
|
||||
export const moderator: Moderator<WorkflowMeta> = (context) => {
|
||||
const { steps } = context;
|
||||
|
||||
if (steps.length === 0) {
|
||||
return "questioner";
|
||||
}
|
||||
|
||||
const last = steps[steps.length - 1];
|
||||
|
||||
if (last.role === "questioner") {
|
||||
return "answerer";
|
||||
}
|
||||
|
||||
if (last.role === "answerer") {
|
||||
const am = last.meta as AnswererMeta;
|
||||
if (am.has_unanswered) {
|
||||
return "explorer";
|
||||
}
|
||||
const q = queueAfterSkippedExplorer(steps);
|
||||
if (q.length === 0) {
|
||||
return END;
|
||||
}
|
||||
return "questioner";
|
||||
}
|
||||
|
||||
if (last.role === "explorer") {
|
||||
if (lastTwoExplorerRunsBothEmpty(steps)) {
|
||||
return END;
|
||||
}
|
||||
const q = queueAfterExplorerStep(steps);
|
||||
if (q.length === 0) {
|
||||
return END;
|
||||
}
|
||||
return "questioner";
|
||||
}
|
||||
|
||||
return END;
|
||||
};
|
||||
102
workflows/extract-knowledge/roles/answerer.ts
Normal file
102
workflows/extract-knowledge/roles/answerer.ts
Normal file
@ -0,0 +1,102 @@
|
||||
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole, nerveCommandEnv, spawnSafe } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
import { resolveWorkdir } from "../lib/workdir.js";
|
||||
|
||||
import type { QuestionerMeta } from "./questioner.js";
|
||||
|
||||
export const answererMetaSchema = z.object({
|
||||
results: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
found: z.boolean(),
|
||||
source: z.string(),
|
||||
note: z.string(),
|
||||
}),
|
||||
),
|
||||
has_unanswered: z.boolean(),
|
||||
});
|
||||
|
||||
export type AnswererMeta = z.infer<typeof answererMetaSchema>;
|
||||
|
||||
export type CreateAnswererRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
};
|
||||
|
||||
function lastQuestionerMeta(messages: WorkflowMessage[]): QuestionerMeta | undefined {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === "questioner") {
|
||||
return messages[i].meta as QuestionerMeta;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export async function answererPrompt(ctx: ThreadContext): Promise<string> {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const cwd = resolveWorkdir(ctx.start);
|
||||
const qm = lastQuestionerMeta(messages);
|
||||
if (!qm || qm.questions.length === 0) {
|
||||
throw new Error("answerer: prompt invoked without questioner questions — wrapped role should short-circuit");
|
||||
}
|
||||
|
||||
const blocks: string[] = [];
|
||||
for (const q of qm.questions) {
|
||||
if ((ctx.start.meta as Record<string, unknown>).dryRun) {
|
||||
blocks.push(`### ${q.id}\n[dryRun] skipped nerve knowledge query\n`);
|
||||
continue;
|
||||
}
|
||||
const res = await spawnSafe(
|
||||
"nerve",
|
||||
["knowledge", "query", q.question],
|
||||
{
|
||||
cwd,
|
||||
env: nerveCommandEnv(),
|
||||
timeoutMs: 120_000,
|
||||
dryRun: false,
|
||||
abortSignal: null,
|
||||
},
|
||||
);
|
||||
if (res.ok) {
|
||||
blocks.push(`### ${q.id} (${q.domain})\nQuestion: ${q.question}\n---\n${res.value.stdout}\n`);
|
||||
} else {
|
||||
const err = res.error;
|
||||
const detail =
|
||||
err.kind === "non_zero_exit"
|
||||
? `exit ${err.exitCode}\n${err.stderr}`
|
||||
: err.kind === "timeout"
|
||||
? `timeout\n${err.stderr}`
|
||||
: err.kind === "spawn_failed"
|
||||
? err.message
|
||||
: "aborted";
|
||||
blocks.push(`### ${q.id}\nnerve knowledge query failed: ${detail}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
return [
|
||||
"You are the **answerer**. You MUST NOT read repository source code — only the CLI retrieval excerpts below.",
|
||||
"For each question id, decide whether the knowledge base already answers it.",
|
||||
"Set found=true only when the excerpt supports a confident answer; otherwise found=false.",
|
||||
"Set has_unanswered=true if any question remains unanswered by the knowledge base.",
|
||||
"",
|
||||
...blocks,
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
export function createAnswererRole(adapter: AgentFn, { extract }: CreateAnswererRoleDeps): Role<AnswererMeta> {
|
||||
const inner = createRole(adapter, answererPrompt, answererMetaSchema, extract);
|
||||
|
||||
return async (ctx: ThreadContext) => {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const qm = lastQuestionerMeta(messages);
|
||||
if (!qm || qm.questions.length === 0) {
|
||||
return {
|
||||
content: "answerer: no questions from questioner; skipping CLI lookup.",
|
||||
meta: { results: [], has_unanswered: false },
|
||||
};
|
||||
}
|
||||
return inner(ctx);
|
||||
};
|
||||
}
|
||||
93
workflows/extract-knowledge/roles/explorer.ts
Normal file
93
workflows/extract-knowledge/roles/explorer.ts
Normal file
@ -0,0 +1,93 @@
|
||||
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
import { resolveWorkdir } from "../lib/workdir.js";
|
||||
|
||||
import type { AnswererMeta } from "./answerer.js";
|
||||
import type { QuestionerMeta } from "./questioner.js";
|
||||
|
||||
export const explorerMetaSchema = z.object({
|
||||
patches: z.array(
|
||||
z.object({
|
||||
card: z.string(),
|
||||
section: z.string(),
|
||||
}),
|
||||
),
|
||||
new_cards: z.array(z.string()),
|
||||
});
|
||||
|
||||
export type ExplorerMeta = z.infer<typeof explorerMetaSchema>;
|
||||
|
||||
export type CreateExplorerRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
};
|
||||
|
||||
function lastMeta<M>(messages: WorkflowMessage[], role: string): M | undefined {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === role) {
|
||||
return messages[i].meta as M;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function explorerPrompt(ctx: ThreadContext): string {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const threadId = ctx.start.meta.threadId;
|
||||
const qm = lastMeta<QuestionerMeta>(messages, "questioner");
|
||||
const am = lastMeta<AnswererMeta>(messages, "answerer");
|
||||
const cwd = resolveWorkdir(ctx.start);
|
||||
|
||||
const unanswered =
|
||||
am?.results.filter((r) => !r.found).map((r) => r.id) ?? [];
|
||||
|
||||
return `You are the **explorer** in an extract-knowledge workflow.
|
||||
|
||||
## Context
|
||||
|
||||
- Thread: \`nerve thread ${threadId}\`
|
||||
- Working directory (repo root for paths): ${cwd}
|
||||
- Current knowledge card (questioner): ${qm?.card ?? "(unknown)"}
|
||||
|
||||
## Unanswered question ids
|
||||
|
||||
${JSON.stringify(unanswered)}
|
||||
|
||||
Use the prior answerer results in the thread to map ids to full question text when you read messages above.
|
||||
|
||||
## Task
|
||||
|
||||
For each unanswered question, **read the codebase** as needed, then either:
|
||||
|
||||
- Add a new markdown file under \`.knowledge/\`, or
|
||||
- Patch an existing card (prefer updating the card listed above when appropriate).
|
||||
|
||||
After any write or patch to \`.knowledge\`, run:
|
||||
|
||||
\`\`\`bash
|
||||
nerve knowledge sync
|
||||
\`\`\`
|
||||
|
||||
from this repo root (${cwd}), and fix failures until sync succeeds.
|
||||
|
||||
## Output meta
|
||||
|
||||
Report \`patches\` as { card, section } entries for cards you edited (section is a short heading or path hint).
|
||||
Report \`new_cards\` as repo-relative paths for brand-new files you created (e.g. \`.knowledge/new-topic.md\`).
|
||||
|
||||
Do not claim work you did not perform.`;
|
||||
}
|
||||
|
||||
export function createExplorerRole(
|
||||
adapter: AgentFn,
|
||||
{ extract }: CreateExplorerRoleDeps,
|
||||
): Role<ExplorerMeta> {
|
||||
return createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) => explorerPrompt(ctx),
|
||||
explorerMetaSchema,
|
||||
extract,
|
||||
);
|
||||
}
|
||||
108
workflows/extract-knowledge/roles/questioner.ts
Normal file
108
workflows/extract-knowledge/roles/questioner.ts
Normal file
@ -0,0 +1,108 @@
|
||||
import { readFile } from "node:fs/promises";
|
||||
import { join } from "node:path";
|
||||
|
||||
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
import { resolveQueueForQuestioner } from "../lib/knowledge-queue.js";
|
||||
import { resolveWorkdir } from "../lib/workdir.js";
|
||||
|
||||
const questionerExtractSchema = z.object({
|
||||
questions: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
question: z.string(),
|
||||
domain: z.string(),
|
||||
}),
|
||||
)
|
||||
.length(5),
|
||||
});
|
||||
|
||||
export type QuestionerMeta = {
|
||||
/** Empty when no .knowledge cards and no work to do. */
|
||||
card: string;
|
||||
questions: { id: string; question: string; domain: string }[];
|
||||
remaining_queue: string[];
|
||||
};
|
||||
|
||||
export type CreateQuestionerRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
};
|
||||
|
||||
function questionerSystem(): string {
|
||||
return `You are the **questioner** in an extract-knowledge workflow.
|
||||
|
||||
Read the given markdown knowledge card. Propose exactly **five** technical questions that are **not** already answered or covered by that card.
|
||||
|
||||
Rules:
|
||||
- Questions must be concrete and technical.
|
||||
- Each question needs a stable string id (e.g. q1, q2, q3, q4, q5), a short domain label (e.g. routing, storage), and the question text.
|
||||
- Do not assume access to other files or tools — reason only from the card content shown.`;
|
||||
}
|
||||
|
||||
function questionerUser(card: string, cardBody: string, remainingHint: string[]): string {
|
||||
return `Current card path: ${card}
|
||||
|
||||
Remaining queue after this card (paths, may be empty): ${JSON.stringify(remainingHint)}
|
||||
|
||||
--- Card content ---
|
||||
|
||||
${cardBody}`;
|
||||
}
|
||||
|
||||
export async function questionerPrompt(ctx: ThreadContext): Promise<string> {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const cwd = resolveWorkdir(ctx.start);
|
||||
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
|
||||
if (queue.length === 0) {
|
||||
throw new Error(
|
||||
"questioner: prompt invoked with empty queue — wrapped role should short-circuit before LLM",
|
||||
);
|
||||
}
|
||||
const card = queue[0]!;
|
||||
const remaining_queue = queue.slice(1);
|
||||
let cardBody: string;
|
||||
try {
|
||||
cardBody = await readFile(join(cwd, card), "utf8");
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
throw new Error(`questioner: failed to read ${card}: ${msg}`);
|
||||
}
|
||||
return `${questionerSystem()}\n\n${questionerUser(card, cardBody, remaining_queue)}`;
|
||||
}
|
||||
|
||||
export function createQuestionerRole(adapter: AgentFn, { extract }: CreateQuestionerRoleDeps): Role<QuestionerMeta> {
|
||||
const inner = createRole(adapter, questionerPrompt, questionerExtractSchema, extract);
|
||||
|
||||
return async (ctx: ThreadContext) => {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const cwd = resolveWorkdir(ctx.start);
|
||||
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
|
||||
if (queue.length === 0) {
|
||||
return {
|
||||
content:
|
||||
"questioner: no `.knowledge` markdown files found and no seed path in the trigger prompt; queue is empty.",
|
||||
meta: {
|
||||
card: "",
|
||||
questions: [],
|
||||
remaining_queue: [],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const card = queue[0]!;
|
||||
const remaining_queue = queue.slice(1);
|
||||
const r = await inner(ctx);
|
||||
return {
|
||||
content: r.content,
|
||||
meta: {
|
||||
card,
|
||||
questions: r.meta.questions,
|
||||
remaining_queue,
|
||||
},
|
||||
};
|
||||
};
|
||||
}
|
||||
43
workflows/solve-issue/build.ts
Normal file
43
workflows/solve-issue/build.ts
Normal file
@ -0,0 +1,43 @@
|
||||
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
|
||||
import { moderator } from "./moderator.js";
|
||||
import type { WorkflowMeta } from "./moderator.js";
|
||||
import { createCommitterRole } from "./roles/committer.js";
|
||||
import { createImplementRole } from "./roles/implement.js";
|
||||
import { createPlanRole } from "./roles/plan.js";
|
||||
import { createPrepareRole } from "./roles/prepare.js";
|
||||
import { createPublishRole } from "./roles/publish.js";
|
||||
import { createReadIssueRole } from "./roles/read-issue.js";
|
||||
import { createReviewRole } from "./roles/review.js";
|
||||
import { createTestRole } from "./roles/test.js";
|
||||
|
||||
export type CreateSolveIssueDeps = {
|
||||
defaultAdapter: AgentFn;
|
||||
adapters?: Partial<Record<keyof WorkflowMeta, AgentFn>>;
|
||||
nerveRoot: string;
|
||||
extract: LlmExtractorConfig;
|
||||
};
|
||||
|
||||
export function createSolveIssueWorkflow({
|
||||
defaultAdapter,
|
||||
adapters,
|
||||
nerveRoot,
|
||||
extract,
|
||||
}: CreateSolveIssueDeps): WorkflowDefinition<WorkflowMeta> {
|
||||
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
|
||||
return {
|
||||
name: "solve-issue",
|
||||
roles: {
|
||||
"read-issue": createReadIssueRole(a("read-issue"), extract),
|
||||
prepare: createPrepareRole(a("prepare"), extract),
|
||||
plan: createPlanRole(a("plan"), { extract, nerveRoot }),
|
||||
implement: createImplementRole(a("implement"), { extract, nerveRoot }),
|
||||
committer: createCommitterRole(a("committer"), extract),
|
||||
review: createReviewRole(a("review"), extract, nerveRoot),
|
||||
test: createTestRole(a("test"), extract),
|
||||
publish: createPublishRole(a("publish"), { extract, nerveRoot }),
|
||||
},
|
||||
moderator,
|
||||
};
|
||||
}
|
||||
37
workflows/solve-issue/index.ts
Normal file
37
workflows/solve-issue/index.ts
Normal file
@ -0,0 +1,37 @@
|
||||
import { join } from "node:path";
|
||||
import { createCursorAdapter } from "@uncaged/nerve-adapter-cursor";
|
||||
import { hermesAdapter } from "@uncaged/nerve-adapter-hermes";
|
||||
import { createSolveIssueWorkflow } from "./build.js";
|
||||
import { resolveDashScopeProvider } from "./lib/provider.js";
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
const NERVE_ROOT = join(HOME, ".uncaged-nerve");
|
||||
|
||||
const provider = await resolveDashScopeProvider(NERVE_ROOT);
|
||||
|
||||
if (provider === null) {
|
||||
throw new Error("Set DASHSCOPE_API_KEY and DASHSCOPE_BASE_URL (or cfg get equivalents)");
|
||||
}
|
||||
|
||||
const CURSOR_TIMEOUT_MS = 300_000;
|
||||
|
||||
const workflow = createSolveIssueWorkflow({
|
||||
defaultAdapter: hermesAdapter,
|
||||
adapters: {
|
||||
plan: createCursorAdapter({
|
||||
type: "cursor",
|
||||
mode: "ask",
|
||||
model: "auto",
|
||||
timeout: CURSOR_TIMEOUT_MS,
|
||||
}),
|
||||
implement: createCursorAdapter({
|
||||
type: "cursor",
|
||||
model: "auto",
|
||||
timeout: CURSOR_TIMEOUT_MS,
|
||||
}),
|
||||
},
|
||||
nerveRoot: NERVE_ROOT,
|
||||
extract: { provider },
|
||||
});
|
||||
|
||||
export default workflow;
|
||||
26
workflows/solve-issue/lib/provider.ts
Normal file
26
workflows/solve-issue/lib/provider.ts
Normal file
@ -0,0 +1,26 @@
|
||||
import type { LlmProvider } from "@uncaged/nerve-workflow-utils";
|
||||
import { spawnSafe } from "@uncaged/nerve-workflow-utils";
|
||||
|
||||
export async function cfgGet(nerveRoot: string, key: string): Promise<string | null> {
|
||||
const result = await spawnSafe("cfg", ["get", key], {
|
||||
cwd: nerveRoot,
|
||||
env: null,
|
||||
timeoutMs: 10_000,
|
||||
abortSignal: null,
|
||||
});
|
||||
if (!result.ok) {
|
||||
return null;
|
||||
}
|
||||
const value = result.value.stdout.trim();
|
||||
return value.length > 0 ? value : null;
|
||||
}
|
||||
|
||||
export async function resolveDashScopeProvider(nerveRoot: string): Promise<LlmProvider | null> {
|
||||
const apiKey = process.env.DASHSCOPE_API_KEY ?? (await cfgGet(nerveRoot, "DASHSCOPE_API_KEY"));
|
||||
const baseUrl = process.env.DASHSCOPE_BASE_URL ?? (await cfgGet(nerveRoot, "DASHSCOPE_BASE_URL"));
|
||||
const model = process.env.DASHSCOPE_MODEL ?? (await cfgGet(nerveRoot, "DASHSCOPE_MODEL")) ?? "qwen-plus";
|
||||
if (!apiKey || !baseUrl) {
|
||||
return null;
|
||||
}
|
||||
return { apiKey, baseUrl, model };
|
||||
}
|
||||
86
workflows/solve-issue/lib/repo-context.ts
Normal file
86
workflows/solve-issue/lib/repo-context.ts
Normal file
@ -0,0 +1,86 @@
|
||||
import { join } from "node:path";
|
||||
import type { RoleStep, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
|
||||
type SolveIssueParse = {
|
||||
host: string;
|
||||
owner: string;
|
||||
repo: string;
|
||||
number: number;
|
||||
};
|
||||
|
||||
type SolveIssueRepo = {
|
||||
path: string;
|
||||
defaultBranch: string;
|
||||
packageManager: string;
|
||||
};
|
||||
|
||||
const HOME = process.env.HOME ?? "/home/azureuser";
|
||||
|
||||
function extractMarkedSection(text: string, marker: string): Record<string, string> | null {
|
||||
const escaped = marker.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||
const re = new RegExp(`---${escaped}---\\s*([\\s\\S]*?)(?:\\n---|$)`);
|
||||
const m = text.match(re);
|
||||
if (m === null) {
|
||||
return null;
|
||||
}
|
||||
const rec: Record<string, string> = {};
|
||||
for (const line of m[1].split("\n")) {
|
||||
const kv = line.match(/^([a-zA-Z]+):\s*(.+)$/);
|
||||
if (kv !== null) {
|
||||
rec[kv[1]] = kv[2].trim();
|
||||
}
|
||||
}
|
||||
return Object.keys(rec).length > 0 ? rec : null;
|
||||
}
|
||||
|
||||
function parseSolveIssueParse(text: string): SolveIssueParse | null {
|
||||
const rec = extractMarkedSection(text, "SOLVE_ISSUE_PARSE");
|
||||
if (rec === null) {
|
||||
return null;
|
||||
}
|
||||
const host = rec.host ?? "";
|
||||
const owner = rec.owner ?? "";
|
||||
const repo = rec.repo ?? "";
|
||||
const num = Number(rec.number ?? "");
|
||||
if (host.length === 0 || owner.length === 0 || repo.length === 0 || !Number.isFinite(num) || num <= 0) {
|
||||
return null;
|
||||
}
|
||||
return { host, owner, repo, number: num };
|
||||
}
|
||||
|
||||
function parseSolveIssueRepo(text: string): SolveIssueRepo | null {
|
||||
const rec = extractMarkedSection(text, "SOLVE_ISSUE_REPO");
|
||||
if (rec === null) {
|
||||
return null;
|
||||
}
|
||||
const path = rec.path ?? "";
|
||||
if (path.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
path,
|
||||
defaultBranch: rec.defaultBranch ?? "main",
|
||||
packageManager: rec.packageManager ?? "pnpm",
|
||||
};
|
||||
}
|
||||
|
||||
/** Prefer explicit prepare marker; else ~/Code/<owner>/<repo> from read-issue parse block. */
|
||||
export function resolveRepoCwd(messages: WorkflowMessage[]): string | null {
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === "prepare") {
|
||||
const repo = parseSolveIssueRepo(messages[i].content);
|
||||
if (repo !== null) {
|
||||
return repo.path;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (let i = messages.length - 1; i >= 0; i--) {
|
||||
if (messages[i].role === "read-issue") {
|
||||
const parsed = parseSolveIssueParse(messages[i].content);
|
||||
if (parsed !== null) {
|
||||
return join(HOME, "Code", parsed.owner, parsed.repo);
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
99
workflows/solve-issue/moderator.ts
Normal file
99
workflows/solve-issue/moderator.ts
Normal file
@ -0,0 +1,99 @@
|
||||
import { END } from "@uncaged/nerve-core";
|
||||
import type { Moderator } from "@uncaged/nerve-core";
|
||||
import type { ReadIssueMeta } from "./roles/read-issue.js";
|
||||
import type { PrepareMeta } from "./roles/prepare.js";
|
||||
import type { PlanMeta } from "./roles/plan.js";
|
||||
import type { ImplementMeta } from "./roles/implement.js";
|
||||
import type { CommitterMeta } from "./roles/committer.js";
|
||||
import type { ReviewMeta } from "./roles/review.js";
|
||||
import type { TestMeta } from "./roles/test.js";
|
||||
import type { PublishMeta } from "./roles/publish.js";
|
||||
|
||||
export type WorkflowMeta = {
|
||||
"read-issue": ReadIssueMeta;
|
||||
prepare: PrepareMeta;
|
||||
plan: PlanMeta;
|
||||
implement: ImplementMeta;
|
||||
committer: CommitterMeta;
|
||||
review: ReviewMeta;
|
||||
test: TestMeta;
|
||||
publish: PublishMeta;
|
||||
};
|
||||
|
||||
const MAX_IMPLEMENT_ROUNDS = 20;
|
||||
const MAX_TOTAL_REJECTIONS = 10;
|
||||
|
||||
function implementRounds(steps: { role: string }[]): number {
|
||||
return steps.filter((s) => s.role === "implement").length;
|
||||
}
|
||||
|
||||
function totalRejections(steps: { role: string; meta: unknown }[]): number {
|
||||
return steps.filter((s) => {
|
||||
if (s.role === "review") return !(s.meta as Record<string, boolean>).approved;
|
||||
if (s.role === "test") return !(s.meta as Record<string, boolean>).passed;
|
||||
if (s.role === "committer") return !(s.meta as Record<string, boolean>).committed;
|
||||
if (s.role === "publish") return !(s.meta as Record<string, boolean>).success;
|
||||
return false;
|
||||
}).length;
|
||||
}
|
||||
|
||||
function canRetryImplement(steps: { role: string; meta: unknown }[]): boolean {
|
||||
return implementRounds(steps) < MAX_IMPLEMENT_ROUNDS && totalRejections(steps) < MAX_TOTAL_REJECTIONS;
|
||||
}
|
||||
|
||||
export const moderator: Moderator<WorkflowMeta> = (context) => {
|
||||
if (context.steps.length === 0) {
|
||||
return "read-issue";
|
||||
}
|
||||
|
||||
const last = context.steps[context.steps.length - 1];
|
||||
|
||||
if (last.role === "read-issue") {
|
||||
return last.meta.ready ? "prepare" : END;
|
||||
}
|
||||
|
||||
if (last.role === "prepare") {
|
||||
return last.meta.ready ? "plan" : END;
|
||||
}
|
||||
|
||||
if (last.role === "plan") {
|
||||
return last.meta.ready ? "implement" : END;
|
||||
}
|
||||
|
||||
if (last.role === "implement") {
|
||||
if (last.meta.done) {
|
||||
return "committer";
|
||||
}
|
||||
return canRetryImplement(context.steps) ? "implement" : END;
|
||||
}
|
||||
|
||||
if (last.role === "committer") {
|
||||
if (last.meta.committed) {
|
||||
return "review";
|
||||
}
|
||||
return canRetryImplement(context.steps) ? "implement" : END;
|
||||
}
|
||||
|
||||
if (last.role === "review") {
|
||||
if (last.meta.approved) {
|
||||
return "test";
|
||||
}
|
||||
return canRetryImplement(context.steps) ? "implement" : END;
|
||||
}
|
||||
|
||||
if (last.role === "test") {
|
||||
if (last.meta.passed) {
|
||||
return "publish";
|
||||
}
|
||||
return canRetryImplement(context.steps) ? "implement" : END;
|
||||
}
|
||||
|
||||
if (last.role === "publish") {
|
||||
if (last.meta.success) {
|
||||
return END;
|
||||
}
|
||||
return canRetryImplement(context.steps) ? "implement" : END;
|
||||
}
|
||||
|
||||
return END;
|
||||
};
|
||||
57
workflows/solve-issue/roles/committer.ts
Normal file
57
workflows/solve-issue/roles/committer.ts
Normal file
@ -0,0 +1,57 @@
|
||||
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole, decorateRole, withDryRun, onFail } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function committerPrompt({ threadId }: { threadId: string }): string {
|
||||
return `You are the committer agent. The **implement** step finished with a passing build; your job is to branch, commit, and push.
|
||||
|
||||
1. Read the workflow thread: \`nerve thread show ${threadId}\` — understand what was planned, implemented, and reviewed.
|
||||
2. In the thread, locate \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\`. From them you need issue **number**, **title** (for the branch slug), repo **path**, and **defaultBranch**.
|
||||
3. \`cd\` to the repo **path** from the markers. Optionally read \`CONVENTIONS.md\` in that repo root if present.
|
||||
4. Run \`git rev-parse --abbrev-ref HEAD\` and compare with **defaultBranch** from the markers. Implement leaves changes uncommitted on the default branch — you should be on that branch with a dirty working tree. If you are not on the default branch, or the tree is clean when you expected changes, set **committed** to false and explain.
|
||||
5. Run \`git status\`. If there is nothing to commit, set **committed** to false and explain.
|
||||
6. Create a feature branch (do not commit directly on the default branch if it would mix unrelated work):
|
||||
- Name: \`fix/<number>-<short-slug>\` for fixes, or \`feat/<number>-<short-slug>\` if the issue is clearly a feature.
|
||||
- **slug**: lowercase, hyphens only, short (from issue title words).
|
||||
- Example: \`git checkout -b fix/42-auth-timeout\`
|
||||
7. \`git add -A\`
|
||||
8. Write a **conventional commit** message describing what changed and why, using the thread context.
|
||||
9. \`git commit -m "<message>"\` — do NOT pass \`--author\`, use repo git config.
|
||||
10. \`git push -u origin <branch-name>\`
|
||||
|
||||
**committed=true** only if branch was created, commit succeeded, and **push** succeeded.
|
||||
|
||||
End your reply with a JSON line:
|
||||
\`\`\`json
|
||||
{ "committed": true }
|
||||
\`\`\`
|
||||
or
|
||||
\`\`\`json
|
||||
{ "committed": false }
|
||||
\`\`\``;
|
||||
}
|
||||
|
||||
export const committerMetaSchema = z.object({
|
||||
committed: z
|
||||
.boolean()
|
||||
.describe("true if branch created, changes committed, and pushed successfully"),
|
||||
});
|
||||
export type CommitterMeta = z.infer<typeof committerMetaSchema>;
|
||||
|
||||
export function createCommitterRole(
|
||||
adapter: AgentFn,
|
||||
extract: LlmExtractorConfig,
|
||||
): Role<CommitterMeta> {
|
||||
const inner = createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) => committerPrompt({ threadId: ctx.start.meta.threadId }),
|
||||
committerMetaSchema,
|
||||
extract,
|
||||
);
|
||||
|
||||
return decorateRole(inner, [
|
||||
withDryRun({ label: "committer", meta: { committed: true } as CommitterMeta }),
|
||||
onFail({ label: "committer", meta: { committed: false } as CommitterMeta }),
|
||||
]) as Role<CommitterMeta>;
|
||||
}
|
||||
86
workflows/solve-issue/roles/implement.ts
Normal file
86
workflows/solve-issue/roles/implement.ts
Normal file
@ -0,0 +1,86 @@
|
||||
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
import { resolveRepoCwd } from "../lib/repo-context.js";
|
||||
|
||||
function buildImplementPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
|
||||
return `You are the **implement** agent. You apply code changes for the issue.
|
||||
|
||||
Read workflow context (plan, reviewer/test feedback): \`nerve thread show ${threadId}\`
|
||||
|
||||
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
|
||||
|
||||
Your cwd is the target repository.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Implement the planned changes; address reviewer/tester feedback from the thread if any.
|
||||
2. Run the project **build** (\`pnpm build\`, \`npm run build\`, etc.) and fix issues until build passes.
|
||||
3. Multi-step: if you cannot finish this round, explain why and set **done** to false.
|
||||
|
||||
Do **not** run \`git checkout -b\`, \`git add\`, \`git commit\`, or \`git push\`. **Never** create commits on any branch — branching and commits are handled by the **committer** step after you finish.
|
||||
|
||||
Then close with JSON:
|
||||
\`\`\`json
|
||||
{ "done": true }
|
||||
\`\`\`
|
||||
or \`{ "done": false }\` matching whether implementation is complete.
|
||||
|
||||
**done=true** only when changes are complete **and** build passes in this round.`;
|
||||
}
|
||||
|
||||
export const implementMetaSchema = z.object({
|
||||
done: z.boolean().describe("true when changes are complete and build passes this round"),
|
||||
});
|
||||
export type ImplementMeta = z.infer<typeof implementMetaSchema>;
|
||||
|
||||
export type CreateImplementRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
nerveRoot: string;
|
||||
};
|
||||
|
||||
export function createImplementRole(
|
||||
adapter: AgentFn,
|
||||
{ extract, nerveRoot }: CreateImplementRoleDeps,
|
||||
): Role<ImplementMeta> {
|
||||
return async (ctx: ThreadContext): Promise<RoleResult<ImplementMeta>> => {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const cwd = resolveRepoCwd(messages);
|
||||
if (cwd === null) {
|
||||
return {
|
||||
content: "implement cannot run: missing repo path in thread markers",
|
||||
meta: { done: false },
|
||||
};
|
||||
}
|
||||
|
||||
const innerRole = createRole(
|
||||
adapter,
|
||||
async (innerCtx: ThreadContext) =>
|
||||
buildImplementPrompt({
|
||||
threadId: innerCtx.start.meta.threadId,
|
||||
nerveRoot,
|
||||
}),
|
||||
implementMetaSchema,
|
||||
extract,
|
||||
);
|
||||
|
||||
const innerCtx: ThreadContext = {
|
||||
...ctx,
|
||||
start: {
|
||||
...ctx.start,
|
||||
meta: { ...ctx.start.meta, workdir: cwd },
|
||||
},
|
||||
};
|
||||
try {
|
||||
return await innerRole(innerCtx);
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
return {
|
||||
content: `implement failed: ${msg}`,
|
||||
meta: { done: false },
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
88
workflows/solve-issue/roles/plan.ts
Normal file
88
workflows/solve-issue/roles/plan.ts
Normal file
@ -0,0 +1,88 @@
|
||||
import type { AgentFn, Role, RoleResult, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
import { resolveRepoCwd } from "../lib/repo-context.js";
|
||||
|
||||
function buildPlanPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
|
||||
return `You are the **plan** agent (analysis only — ask mode). You produce an implementation plan for fixing the issue.
|
||||
|
||||
Read workflow context: \`nerve thread show ${threadId}\`
|
||||
|
||||
Read Nerve workspace conventions (coding rules for agents): \`cat ${nerveRoot}/CONVENTIONS.md\`
|
||||
|
||||
In the **target repository** (your cwd), skim relevant files and read \`CONVENTIONS.md\` **if it exists** there.
|
||||
|
||||
## Output
|
||||
|
||||
Write an implementation plan in **markdown** with:
|
||||
|
||||
1. Problem understanding
|
||||
2. Change strategy
|
||||
3. Target files (paths)
|
||||
4. **Test commands** to run (explicit shell commands, e.g. \`pnpm test\`, \`pnpm vitest run\`)
|
||||
5. Risks
|
||||
|
||||
End your reply with a JSON code block (meta signal):
|
||||
\`\`\`json
|
||||
{ "ready": true }
|
||||
\`\`\`
|
||||
Use \`{ "ready": false }\` if the plan cannot be made actionable.
|
||||
|
||||
**ready=true** only when the plan is clear and actionable.`;
|
||||
}
|
||||
|
||||
export const planMetaSchema = z.object({
|
||||
ready: z.boolean().describe("true if plan is clear and actionable"),
|
||||
});
|
||||
export type PlanMeta = z.infer<typeof planMetaSchema>;
|
||||
|
||||
export type CreatePlanRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
nerveRoot: string;
|
||||
};
|
||||
|
||||
export function createPlanRole(
|
||||
adapter: AgentFn,
|
||||
{ extract, nerveRoot }: CreatePlanRoleDeps,
|
||||
): Role<PlanMeta> {
|
||||
return async (ctx: ThreadContext): Promise<RoleResult<PlanMeta>> => {
|
||||
const messages = ctx.steps as unknown as WorkflowMessage[];
|
||||
const cwd = resolveRepoCwd(messages);
|
||||
if (cwd === null) {
|
||||
return {
|
||||
content: "plan cannot run: missing ---SOLVE_ISSUE_REPO--- or ---SOLVE_ISSUE_PARSE--- in thread",
|
||||
meta: { ready: false },
|
||||
};
|
||||
}
|
||||
|
||||
const innerRole = createRole(
|
||||
adapter,
|
||||
async (innerCtx: ThreadContext) =>
|
||||
buildPlanPrompt({
|
||||
threadId: innerCtx.start.meta.threadId,
|
||||
nerveRoot,
|
||||
}),
|
||||
planMetaSchema,
|
||||
extract,
|
||||
);
|
||||
|
||||
const innerCtx: ThreadContext = {
|
||||
...ctx,
|
||||
start: {
|
||||
...ctx.start,
|
||||
meta: { ...ctx.start.meta, workdir: cwd },
|
||||
},
|
||||
};
|
||||
try {
|
||||
return await innerRole(innerCtx);
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
return {
|
||||
content: `plan failed: ${msg}`,
|
||||
meta: { ready: false },
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
73
workflows/solve-issue/roles/prepare.ts
Normal file
73
workflows/solve-issue/roles/prepare.ts
Normal file
@ -0,0 +1,73 @@
|
||||
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function preparePrompt({ threadId }: { threadId: string }): string {
|
||||
return `You are the **prepare** agent. You ensure the target repository is ready for work.
|
||||
|
||||
Read prior messages / thread for issue markers: \`nerve thread show ${threadId}\`
|
||||
|
||||
## Goal
|
||||
|
||||
Find **owner**, **repo**, and **host** from \`---SOLVE_ISSUE_PARSE---\` in the thread (from read-issue).
|
||||
|
||||
Check the **initial user prompt** (the trigger message) for a local repo path. The user may specify it like:
|
||||
- \`--repo /path/to/repo\`
|
||||
- \`repo: /path/to/repo\`
|
||||
- or just mention an absolute path to the local clone
|
||||
|
||||
## Steps
|
||||
|
||||
### If a local path is provided in the trigger prompt:
|
||||
1. Verify \`<path>/.git\` exists — if not, fail with \`ready: false\`
|
||||
2. \`cd "<path>" && git fetch --all\`
|
||||
3. Ensure working tree clean: if \`git status --porcelain\` is non-empty, \`git stash push -u -m "solve-issue stash"\`
|
||||
4. Detect default branch (\`main\` or \`master\`) and \`git checkout <default> && git pull --ff-only\`
|
||||
5. Use this path as REPOPATH
|
||||
|
||||
### If no local path is provided:
|
||||
1. Let \`REPOPATH=$HOME/Code/<owner>/<repo>\` (expand \`$HOME\`)
|
||||
2. \`mkdir -p "$HOME/Code/<owner>"\`
|
||||
3. If \`REPOPATH/.git\` is missing: \`git clone https://<host>/<owner>/<repo>.git "$REPOPATH"\`
|
||||
Else: \`cd "$REPOPATH" && git fetch --all && git pull --ff-only\`
|
||||
4. Ensure working tree clean: if \`git status --porcelain\` is non-empty, \`git stash push -u -m "solve-issue stash"\`
|
||||
5. Detect default branch and \`git checkout <default>\`
|
||||
|
||||
### Then (both paths):
|
||||
6. Detect package manager: \`pnpm-lock.yaml\` → pnpm, \`yarn.lock\` → yarn, \`package-lock.json\` → npm; run install (\`pnpm install --no-frozen-lockfile\` / \`npm ci\` or \`npm install\` / \`yarn\`).
|
||||
7. If \`package.json\` has a \`build\` script, run the build (\`pnpm build\`, etc.) and fix nothing — only verify baseline passes.
|
||||
|
||||
## Required marker block
|
||||
|
||||
Emit **exactly**:
|
||||
\`\`\`
|
||||
---SOLVE_ISSUE_REPO---
|
||||
path: <absolute path to REPOPATH>
|
||||
defaultBranch: <main or master>
|
||||
packageManager: <pnpm|npm|yarn>
|
||||
---
|
||||
\`\`\`
|
||||
|
||||
End with:
|
||||
\`\`\`json
|
||||
{ "ready": true }
|
||||
\`\`\`
|
||||
or \`{ "ready": false }\` if the repo is invalid, or install/build baseline failed.
|
||||
|
||||
**ready=true** only when the repo exists at \`path\`, is clean, dependencies installed, and baseline build succeeded (or no build script).`;
|
||||
}
|
||||
|
||||
export const prepareMetaSchema = z.object({
|
||||
ready: z.boolean().describe("true if repo is ready and baseline build ok"),
|
||||
});
|
||||
export type PrepareMeta = z.infer<typeof prepareMetaSchema>;
|
||||
|
||||
export function createPrepareRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<PrepareMeta> {
|
||||
return createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) => preparePrompt({ threadId: ctx.start.meta.threadId }),
|
||||
prepareMetaSchema,
|
||||
extract,
|
||||
);
|
||||
}
|
||||
110
workflows/solve-issue/roles/publish.ts
Normal file
110
workflows/solve-issue/roles/publish.ts
Normal file
@ -0,0 +1,110 @@
|
||||
import { mkdirSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import type { AgentFn, Role, RoleResult, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole, isDryRun } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function buildPublishPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
|
||||
return `You are the **publish** agent (Hermes). Test has passed. Open a pull request for the current branch using the **tea** CLI.
|
||||
|
||||
## Context
|
||||
|
||||
- Read the full workflow thread: \`nerve thread show ${threadId}\`
|
||||
- Nerve workspace conventions (for tone/consistency, optional): \`cat ${nerveRoot}/CONVENTIONS.md\`
|
||||
|
||||
## Repo and issue (from the thread)
|
||||
|
||||
Find \`---SOLVE_ISSUE_PARSE---\` and \`---SOLVE_ISSUE_REPO---\` in prior messages. You need:
|
||||
- \`path\` — clone checkout directory (this is your working copy)
|
||||
- \`host\`, \`owner\`, \`repo\`, \`number\` for the issue
|
||||
- \`defaultBranch\` (for PR base) from SOLVE_ISSUE_REPO
|
||||
|
||||
**Issue link** for the Ref section: \`https://<host>/<owner>/<repo>/issues/<number>\`
|
||||
|
||||
## Steps (in order)
|
||||
|
||||
1. \`cd\` to the **repo \`path\`**. Run \`git rev-parse --abbrev-ref HEAD\` to get the current branch name. The **committer** step should already have pushed this branch; run \`git push -u origin <that-branch>\` only if the branch is not yet on the remote.
|
||||
2. Choose a **PR title** that reflects the real change (not a generic \`fix: issue #N\`): derive it from the issue title, plan, and thread summary (keep it concise; Conventional Commits style is fine, e.g. \`fix(auth): handle session expiry\`).
|
||||
3. Write a **PR body** in Markdown with exactly these sections, in this order, each with a \`##\` heading (fill with concise content based on the thread: plan, implement, review, test):
|
||||
- **## What** — one short paragraph: what this PR does
|
||||
- **## Why** — one short paragraph: motivation / issue
|
||||
- **## Changes** — bullet list of notable changes
|
||||
- **## Ref** — include one line \`Fixes #<number>\` (same \`number\` from SOLVE_ISSUE_PARSE; closes/links the issue where supported) **and** the issue URL \`https://<host>/<owner>/<repo>/issues/<number>\`
|
||||
4. Create the PR with **tea** (not curl/fetch to Gitea):
|
||||
- \`tea pr create --repo <owner>/<repo> --base <defaultBranch> --head <branch> --title "<your meaningful title>" --body <your markdown body>\`
|
||||
- You may use a heredoc or a temp file for \`--body\` if the shell requires it; keep the four sections in the body.
|
||||
5. Confirm the PR was created (tea prints a URL or PR number in typical setups).
|
||||
|
||||
**success=true** only if both **push** and **tea** PR creation succeed. If any step fails, set **success=false** and say why.
|
||||
|
||||
End your reply with a JSON line:
|
||||
\`\`\`json
|
||||
{ "success": true }
|
||||
\`\`\`
|
||||
or
|
||||
\`\`\`json
|
||||
{ "success": false }
|
||||
\`\`\``;
|
||||
}
|
||||
|
||||
export const publishMetaSchema = z.object({
|
||||
success: z.boolean().describe("true if git push and tea pr create both succeeded"),
|
||||
});
|
||||
export type PublishMeta = z.infer<typeof publishMetaSchema>;
|
||||
|
||||
export type CreatePublishRoleDeps = {
|
||||
extract: LlmExtractorConfig;
|
||||
nerveRoot: string;
|
||||
};
|
||||
|
||||
function logPath(nerveRoot: string): string {
|
||||
return join(nerveRoot, "logs", `solve-issue-publish-${Date.now()}.log`);
|
||||
}
|
||||
|
||||
export function createPublishRole(
|
||||
adapter: AgentFn,
|
||||
{ extract, nerveRoot }: CreatePublishRoleDeps,
|
||||
): Role<PublishMeta> {
|
||||
const innerRole = createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) =>
|
||||
buildPublishPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
|
||||
publishMetaSchema,
|
||||
extract,
|
||||
);
|
||||
|
||||
return async (ctx: ThreadContext): Promise<RoleResult<PublishMeta>> => {
|
||||
const file = logPath(nerveRoot);
|
||||
mkdirSync(join(file, ".."), { recursive: true });
|
||||
|
||||
if (isDryRun(ctx.start)) {
|
||||
const msg = "[dry-run] publish skipped (no git push / PR)";
|
||||
writeFileSync(file, `${msg}\n`, "utf-8");
|
||||
return {
|
||||
content: `[dry-run] publish skipped — log: ${file}`,
|
||||
meta: { success: true },
|
||||
};
|
||||
}
|
||||
|
||||
const innerCtx: ThreadContext = {
|
||||
...ctx,
|
||||
start: {
|
||||
...ctx.start,
|
||||
meta: { ...ctx.start.meta, workdir: nerveRoot },
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
return await innerRole(innerCtx);
|
||||
} catch (e) {
|
||||
const msg = e instanceof Error ? e.message : String(e);
|
||||
const body = `publish failed: ${msg}\n`;
|
||||
writeFileSync(file, body, "utf-8");
|
||||
return {
|
||||
content: `publish failed: ${msg}\nLog: ${file}`,
|
||||
meta: { success: false },
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
53
workflows/solve-issue/roles/read-issue.ts
Normal file
53
workflows/solve-issue/roles/read-issue.ts
Normal file
@ -0,0 +1,53 @@
|
||||
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function readIssuePrompt({ threadId }: { threadId: string }): string {
|
||||
return `You are the **read-issue** agent. You fetch Gitea issue content via the \`tea\` CLI.
|
||||
|
||||
Read the workflow thread start prompt for the issue URL (same run): \`nerve thread show ${threadId}\`
|
||||
|
||||
## Steps
|
||||
|
||||
1. From the **initial user prompt** (issue URL), extract **host**, **owner**, **repo**, and **issue number**. Supported shape:
|
||||
\`https://<host>/<owner>/<repo>/issues/<number>\`
|
||||
|
||||
2. Run:
|
||||
\`tea issue show <number> --repo <owner>/<repo> --comments\`
|
||||
(Add \`--json\` if helpful for parsing.)
|
||||
|
||||
3. In your reply, include **structured issue text**: title, body, labels, and each comment (author + body + time).
|
||||
|
||||
4. You **must** emit this marker block **exactly** (fill in real values):
|
||||
\`\`\`
|
||||
---SOLVE_ISSUE_PARSE---
|
||||
host: <host>
|
||||
owner: <owner>
|
||||
repo: <repo>
|
||||
number: <number>
|
||||
---
|
||||
\`\`\`
|
||||
|
||||
5. End with JSON meta (verbatim block):
|
||||
\`\`\`json
|
||||
{ "ready": true }
|
||||
\`\`\`
|
||||
Use \`{ "ready": false }\` if you could not fetch or parse the issue.
|
||||
|
||||
**ready=true** only if the issue was fetched successfully and the marker block is correct.`;
|
||||
}
|
||||
|
||||
export const readIssueMetaSchema = z.object({
|
||||
ready: z.boolean().describe("true if issue content was fetched and markers are present"),
|
||||
});
|
||||
export type ReadIssueMeta = z.infer<typeof readIssueMetaSchema>;
|
||||
|
||||
export function createReadIssueRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<ReadIssueMeta> {
|
||||
return createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) => readIssuePrompt({ threadId: ctx.start.meta.threadId }),
|
||||
readIssueMetaSchema,
|
||||
extract,
|
||||
);
|
||||
}
|
||||
59
workflows/solve-issue/roles/review.ts
Normal file
59
workflows/solve-issue/roles/review.ts
Normal file
@ -0,0 +1,59 @@
|
||||
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function reviewPrompt({ threadId, nerveRoot }: { threadId: string; nerveRoot: string }): string {
|
||||
return `You are a **code reviewer** (Hermes). You run after implement and before test.
|
||||
|
||||
Read Nerve workspace conventions: \`cat ${nerveRoot}/CONVENTIONS.md\`
|
||||
|
||||
Read workflow context: \`nerve thread show ${threadId}\`
|
||||
|
||||
Find **repo path** from \`---SOLVE_ISSUE_REPO--- path:\` in the thread (prepare step). \`cd\` there before any git commands.
|
||||
|
||||
## Static analysis
|
||||
|
||||
Run:
|
||||
|
||||
1. \`cd <repo-path> && git diff --stat\`
|
||||
2. \`cd <repo-path> && git diff\`
|
||||
3. \`cd <repo-path> && git status --short\`
|
||||
|
||||
## Checklist
|
||||
|
||||
Reject (**approved: false**) if you find:
|
||||
|
||||
- Garbage files, secrets/credentials, unrelated changes
|
||||
- Violations of CONVENTIONS.md (e.g. \`interface\` vs \`type\`, dynamic \`import()\`)
|
||||
|
||||
Approve (**approved: true**) if the diff is clean and focused.
|
||||
|
||||
End with:
|
||||
\`\`\`json
|
||||
{ "approved": true }
|
||||
\`\`\`
|
||||
or
|
||||
\`\`\`json
|
||||
{ "approved": false }
|
||||
\`\`\``;
|
||||
}
|
||||
|
||||
export const reviewMetaSchema = z.object({
|
||||
approved: z.boolean().describe("true if diff is clean and ready for tests"),
|
||||
});
|
||||
export type ReviewMeta = z.infer<typeof reviewMetaSchema>;
|
||||
|
||||
export function createReviewRole(
|
||||
adapter: AgentFn,
|
||||
extract: LlmExtractorConfig,
|
||||
nerveRoot: string,
|
||||
): Role<ReviewMeta> {
|
||||
return createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) =>
|
||||
reviewPrompt({ threadId: ctx.start.meta.threadId, nerveRoot }),
|
||||
reviewMetaSchema,
|
||||
extract,
|
||||
);
|
||||
}
|
||||
40
workflows/solve-issue/roles/test.ts
Normal file
40
workflows/solve-issue/roles/test.ts
Normal file
@ -0,0 +1,40 @@
|
||||
import type { AgentFn, Role, ThreadContext } from "@uncaged/nerve-core";
|
||||
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
|
||||
import { createRole } from "@uncaged/nerve-workflow-utils";
|
||||
import { z } from "zod";
|
||||
|
||||
function testPrompt({ threadId }: { threadId: string }): string {
|
||||
return `You are the **test** agent (Hermes). You execute automated tests for the change.
|
||||
|
||||
Read workflow context: \`nerve thread show ${threadId}\`
|
||||
|
||||
Find **repo path** from \`---SOLVE_ISSUE_REPO--- path:\` in the thread.
|
||||
|
||||
From the **plan** step output, locate **Test commands** (explicit shell commands). Run each command with cwd = repo path, in order.
|
||||
|
||||
If the plan lists **no** test commands, try **pnpm test**, then **npm test** if pnpm is unavailable; if neither applies, explain skip.
|
||||
|
||||
Collect stdout/stderr snippets on failure.
|
||||
|
||||
End with JSON only:
|
||||
\`\`\`json
|
||||
{ "passed": true }
|
||||
\`\`\`
|
||||
or \`{ "passed": false }\`
|
||||
|
||||
**passed=true** only if every executed command exited 0 (or skip was justified with no failing command).`;
|
||||
}
|
||||
|
||||
export const testMetaSchema = z.object({
|
||||
passed: z.boolean().describe("true if all test commands passed"),
|
||||
});
|
||||
export type TestMeta = z.infer<typeof testMetaSchema>;
|
||||
|
||||
export function createTestRole(adapter: AgentFn, extract: LlmExtractorConfig): Role<TestMeta> {
|
||||
return createRole(
|
||||
adapter,
|
||||
async (ctx: ThreadContext) => testPrompt({ threadId: ctx.start.meta.threadId }),
|
||||
testMetaSchema,
|
||||
extract,
|
||||
);
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user