import { trace, component } from "@agentmark/sdk";
await trace(
{
name: "ai-agent-workflow",
metadata: {
"graph.node.id": "orchestrator",
"graph.node.display_name": "Orchestrator",
"graph.node.type": "router"
}
},
async () => {
// Step 1: Process input
await component(
{
name: "input-processor",
metadata: {
"graph.node.id": "input-processor",
"graph.node.parent_id": "orchestrator",
"graph.node.display_name": "Input Processor",
"graph.node.type": "agent"
}
},
async () => {
// Process and validate input
}
);
// Step 2: Retrieve context
await component(
{
name: "context-retrieval",
metadata: {
"graph.node.id": "context-retrieval",
"graph.node.parent_id": "orchestrator",
"graph.node.display_name": "Context Retrieval",
"graph.node.type": "retrieval"
}
},
async () => {
// Fetch relevant context
}
);
// Step 3: LLM reasoning (depends on both previous steps)
await component(
{
name: "llm-reasoning",
metadata: {
"graph.node.id": "llm-reasoning",
"graph.node.parent_ids": JSON.stringify(["input-processor", "context-retrieval"]),
"graph.node.display_name": "LLM Reasoning",
"graph.node.type": "llm"
}
},
async () => {
const prompt = await client.loadTextPrompt('reason.prompt.mdx');
// ... reasoning logic
}
);
}
);