refactor(knowledge-extraction): convert questioner and answerer to createRole four-tuple

- questioner: createRole(adapter, questionerPrompt, schema, extract) + queue short-circuit + meta post-processing
- answerer: createRole(adapter, answererPrompt, schema, extract) + empty-questions short-circuit
- build.ts: use createLlmAdapter(extract.provider) as default LLM adapter for questioner/answerer

Refs uncaged/nerve#277
This commit is contained in:
小橘 2026-04-30 12:38:58 +00:00
parent 1da41c7f08
commit 7432f80d61
3 changed files with 85 additions and 85 deletions

View File

@ -1,5 +1,6 @@
import type { AgentFn, WorkflowDefinition } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createLlmAdapter } from "@uncaged/nerve-workflow-utils";
import { moderator } from "./moderator.js";
import type { WorkflowMeta } from "./moderator.js";
@ -19,11 +20,12 @@ export function createKnowledgeExtractionWorkflow({
extract,
}: CreateKnowledgeExtractionDeps): WorkflowDefinition<WorkflowMeta> {
const a = (role: keyof WorkflowMeta) => adapters?.[role] ?? defaultAdapter;
const llmAdapter = createLlmAdapter(extract.provider);
return {
name: "knowledge-extraction",
roles: {
questioner: createQuestionerRole({ extract }),
answerer: createAnswererRole({ extract }),
questioner: createQuestionerRole(adapters?.questioner ?? llmAdapter, { extract }),
answerer: createAnswererRole(adapters?.answerer ?? llmAdapter, { extract }),
explorer: createExplorerRole(a("explorer"), { extract }),
},
moderator,

View File

@ -1,6 +1,6 @@
import type { Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { llmExtract, nerveCommandEnv, spawnSafe } from "@uncaged/nerve-workflow-utils";
import { createRole, nerveCommandEnv, spawnSafe } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveWorkdir } from "../lib/workdir.js";
@ -34,18 +34,12 @@ function lastQuestionerMeta(messages: WorkflowMessage[]): QuestionerMeta | undef
return undefined;
}
export function createAnswererRole(deps: CreateAnswererRoleDeps): Role<AnswererMeta> {
const { extract } = deps;
return async (ctx: ThreadContext) => {
export async function answererPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
return {
content: "answerer: no questions from questioner; skipping CLI lookup.",
meta: { results: [], has_unanswered: false },
};
throw new Error("answerer: prompt invoked without questioner questions — wrapped role should short-circuit");
}
const blocks: string[] = [];
@ -81,7 +75,7 @@ export function createAnswererRole(deps: CreateAnswererRoleDeps): Role<AnswererM
}
}
const bundle = [
return [
"You are the **answerer**. You MUST NOT read repository source code — only the CLI retrieval excerpts below.",
"For each question id, decide whether the knowledge base already answers it.",
"Set found=true only when the excerpt supports a confident answer; otherwise found=false.",
@ -89,17 +83,20 @@ export function createAnswererRole(deps: CreateAnswererRoleDeps): Role<AnswererM
"",
...blocks,
].join("\n");
}
const metaR = await llmExtract({
text: bundle,
schema: answererMetaSchema,
provider: extract.provider,
dryRun: false,
});
if (!metaR.ok) {
throw new Error(`answerer llmExtract: ${JSON.stringify(metaR.error)}`);
export function createAnswererRole(adapter: AgentFn, { extract }: CreateAnswererRoleDeps): Role<AnswererMeta> {
const inner = createRole(adapter, answererPrompt, answererMetaSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
const qm = lastQuestionerMeta(messages);
if (!qm || qm.questions.length === 0) {
return {
content: "answerer: no questions from questioner; skipping CLI lookup.",
meta: { results: [], has_unanswered: false },
};
}
return { content: bundle, meta: metaR.value };
return inner(ctx);
};
}

View File

@ -1,9 +1,9 @@
import { readFile } from "node:fs/promises";
import { join } from "node:path";
import type { Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { AgentFn, Role, ThreadContext, WorkflowMessage } from "@uncaged/nerve-core";
import type { LlmExtractorConfig } from "@uncaged/nerve-workflow-utils";
import { createLlmRole } from "@uncaged/nerve-workflow-utils";
import { createRole } from "@uncaged/nerve-workflow-utils";
import { z } from "zod";
import { resolveQueueForQuestioner } from "../lib/knowledge-queue.js";
@ -53,8 +53,29 @@ Remaining queue after this card (paths, may be empty): ${JSON.stringify(remainin
${cardBody}`;
}
export function createQuestionerRole(adapterExtract: CreateQuestionerRoleDeps): Role<QuestionerMeta> {
const { extract } = adapterExtract;
export async function questionerPrompt(ctx: ThreadContext): Promise<string> {
const messages = ctx.steps as unknown as WorkflowMessage[];
const cwd = resolveWorkdir(ctx.start);
const queue = await resolveQueueForQuestioner(ctx.start, messages, cwd);
if (queue.length === 0) {
throw new Error(
"questioner: prompt invoked with empty queue — wrapped role should short-circuit before LLM",
);
}
const card = queue[0]!;
const remaining_queue = queue.slice(1);
let cardBody: string;
try {
cardBody = await readFile(join(cwd, card), "utf8");
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
throw new Error(`questioner: failed to read ${card}: ${msg}`);
}
return `${questionerSystem()}\n\n${questionerUser(card, cardBody, remaining_queue)}`;
}
export function createQuestionerRole(adapter: AgentFn, { extract }: CreateQuestionerRoleDeps): Role<QuestionerMeta> {
const inner = createRole(adapter, questionerPrompt, questionerExtractSchema, extract);
return async (ctx: ThreadContext) => {
const messages = ctx.steps as unknown as WorkflowMessage[];
@ -74,26 +95,6 @@ export function createQuestionerRole(adapterExtract: CreateQuestionerRoleDeps):
const card = queue[0]!;
const remaining_queue = queue.slice(1);
let cardBody: string;
try {
cardBody = await readFile(join(cwd, card), "utf8");
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
throw new Error(`questioner: failed to read ${card}: ${msg}`);
}
const inner = createLlmRole({
provider: extract.provider,
prompt: async () => [
{ role: "system", content: questionerSystem() },
{ role: "user", content: questionerUser(card, cardBody, remaining_queue) },
],
extract: {
schema: questionerExtractSchema,
provider: extract.provider,
},
});
const r = await inner(ctx);
return {
content: r.content,