When you run a prompt in the AgentMark platform, it sends a prompt-run event to your webhook endpoint. This event contains the prompt AST and any custom properties.
Most users should use the webhook handler from their adapter’s /runner export, which handles all of this automatically. This page documents the manual approach for advanced use cases.
{
"event": {
"type": "prompt-run",
"data": {
"ast": { ... },
"customProps": { ... },
"options": { "shouldStream": true }
}
}
}
Handling Different Generation Types
The examples below use the Vercel AI SDK (v5). First, import the necessary functions:
import { NextRequest, NextResponse } from "next/server";
import { AgentMarkSDK } from "@agentmark-ai/sdk";
import {
createAgentMarkClient,
VercelAIModelRegistry,
} from "@agentmark-ai/ai-sdk-v5-adapter";
import { openai } from "@ai-sdk/openai";
import { getFrontMatter } from "@agentmark-ai/templatedx";
import {
generateText,
generateObject,
experimental_generateImage as generateImage,
experimental_generateSpeech as generateSpeech,
} from "ai";
1. Text Generation
if (frontmatter.text_config) {
const prompt = await agentmark.loadTextPrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { text, finishReason, usage, steps } = await generateText(vercelInput);
const toolCalls = steps.map((step) => step.toolCalls).flat();
const toolResults = steps.map((step) => step.toolResults).flat();
return NextResponse.json({
type: "text",
result: text,
usage: usage,
finishReason: finishReason,
toolCalls: toolCalls,
toolResults: toolResults,
});
}
{
"type": "text",
"result": "Generated text content",
"usage": {
"promptTokens": 0,
"completionTokens": 0,
"totalTokens": 0
},
"finishReason": "stop",
"toolCalls": [
{
"toolName": "string",
"args": {},
"type": "string",
"toolCallId": "string"
}
],
"toolResults": [
{
"toolCallId": "string",
"result": "string or object",
"type": "string",
"toolName": "string",
"args": {}
}
]
}
2. Object Generation
if (frontmatter.object_config) {
const prompt = await agentmark.loadObjectPrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { object, finishReason, usage } = await generateObject(vercelInput);
return NextResponse.json({
type: "object",
result: object,
usage: usage,
finishReason: finishReason,
});
}
{
"type": "object",
"result": {
"key": "structured output"
},
"usage": {
"promptTokens": 0,
"completionTokens": 0,
"totalTokens": 0
},
"finishReason": "stop"
}
3. Image Generation
if (frontmatter.image_config) {
const prompt = await agentmark.loadImagePrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { images } = await generateImage(vercelInput);
return NextResponse.json({
type: "image",
result: images.map((image) => ({
base64: image.base64,
mimeType: image.mimeType,
})),
});
}
{
"type": "image",
"result": [
{
"base64": "base64_encoded_image",
"mimeType": "image/png"
}
]
}
4. Speech Generation
if (frontmatter.speech_config) {
const prompt = await agentmark.loadSpeechPrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { audio } = await generateSpeech(vercelInput);
return NextResponse.json({
type: "speech",
result: {
base64: audio.base64,
format: audio.format,
mimeType: audio.mimeType,
},
});
}
{
"type": "speech",
"result": {
"base64": "base64_encoded_audio",
"format": "mp3",
"mimeType": "audio/mpeg"
}
}
Streaming Responses
AgentMark supports streaming responses for text and object generation. To enable streaming, set the AgentMark-Streaming header to true in your response.
Text Generation Streaming
import { streamText } from "ai";
if (frontmatter.text_config) {
const prompt = await agentmark.loadTextPrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { fullStream } = streamText(vercelInput);
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder();
for await (const chunk of fullStream) {
if (chunk.type === "error") {
const err = chunk.error as any;
const message =
err?.message ??
err?.data?.error?.message ??
"Something went wrong during inference";
controller.enqueue(
encoder.encode(
JSON.stringify({ error: message, type: "error" }) + "\n"
)
);
controller.close();
return;
}
if (chunk.type === "text-delta") {
controller.enqueue(
encoder.encode(
JSON.stringify({ result: chunk.textDelta, type: "text" }) + "\n"
)
);
}
if (chunk.type === "tool-call") {
controller.enqueue(
encoder.encode(
JSON.stringify({
toolCall: {
args: chunk.args,
toolCallId: chunk.toolCallId,
toolName: chunk.toolName,
},
type: "text",
}) + "\n"
)
);
}
if (chunk.type === "tool-result") {
controller.enqueue(
encoder.encode(
JSON.stringify({
toolResult: {
args: chunk.args,
toolCallId: chunk.toolCallId,
toolName: chunk.toolName,
result: chunk.result,
},
type: "text",
}) + "\n"
)
);
}
if (chunk.type === "finish") {
controller.enqueue(
encoder.encode(
JSON.stringify({
finishReason: chunk.finishReason,
usage: chunk.usage,
type: "text",
}) + "\n"
)
);
}
}
controller.close();
},
});
return new NextResponse(stream, {
headers: {
"Content-Type": "application/json",
"Transfer-Encoding": "chunked",
"AgentMark-Streaming": "true",
},
});
}
The stream sends newline-delimited JSON chunks:
{"type":"text","result":"First chunk"}
{"type":"text","result":"Second chunk"}
{"type":"text","toolCall":{"toolName":"get_weather","toolCallId":"call_123","args":{"city":"New York"}}}
{"type":"text","toolResult":{"toolName":"get_weather","toolCallId":"call_123","args":{"city":"New York"},"result":"Sunny, 22C"}}
{"type":"text","result":"Final response chunk"}
{"type":"text","finishReason":"stop","usage":{"promptTokens":10,"completionTokens":20,"totalTokens":30}}
Object Generation Streaming
import { streamObject } from "ai";
if (frontmatter.object_config) {
const prompt = await agentmark.loadObjectPrompt(data.ast);
const vercelInput = await prompt.format({
props: data.customProps ?? {},
});
const { usage, fullStream } = streamObject(vercelInput);
const stream = new ReadableStream({
async start(controller) {
const encoder = new TextEncoder();
for await (const chunk of fullStream) {
if (chunk.type === "error") {
const err = chunk.error as any;
const message =
err?.message ??
err?.data?.error?.message ??
"Something went wrong during inference";
controller.enqueue(
encoder.encode(
JSON.stringify({ error: message, type: "error" }) + "\n"
)
);
controller.close();
return;
}
if (chunk.type === "object") {
controller.enqueue(
encoder.encode(
JSON.stringify({ result: chunk.object, type: "object" }) + "\n"
)
);
}
}
const usageData = await usage;
controller.enqueue(
encoder.encode(JSON.stringify({ usage: usageData, type: "object" }))
);
controller.close();
},
});
return new NextResponse(stream, {
headers: {
"Content-Type": "application/json",
"Transfer-Encoding": "chunked",
"AgentMark-Streaming": "true",
},
});
}
Stream format:
{"type":"object","result":{"partial":"object"}}
{"type":"object","result":{"more":"complete","partial":"object"}}
{"type":"object","result":{"final":"complete","object":"data"}}
{"type":"object","usage":{"promptTokens":10,"completionTokens":20,"totalTokens":30}}
Error Handling
Handle errors and return appropriate status codes:
try {
// Process prompt run
} catch (error) {
console.error("Prompt run error:", error);
return NextResponse.json(
{ message: "Error processing prompt run" },
{ status: 500 }
);
}
These examples use the Vercel AI SDK for generation. For other adapters (Claude Agent SDK, Mastra), use the webhook handler instead of manual implementation.
Have Questions?
We’re here to help! Choose the best way to reach us: