When you run a prompt in the AgentMark platform, it sends a prompt-run event to your webhook endpoint. This event contains the prompt configuration and any test settings you’ve defined.

Event Format

{
  "event": {
    "type": "prompt-run",
    "data": {
      "prompt": {
        ...A prompt in Agentmark format
      }
    }
  }
}

Handling Different Generation Types

The webhook handler supports four types of generation using the Vercel AI SDK. First, import the necessary functions:

import { NextRequest, NextResponse } from "next/server";
import { AgentMarkSDK } from "@agentmark/sdk";
import {
  createAgentMarkClient,
  VercelAIModelRegistry,
} from "@agentmark/vercel-ai-v4-adapter";
import { openai } from "@ai-sdk/openai";
import { getFrontMatter } from "@agentmark/templatedx";
import {
  generateText,
  generateObject,
  experimental_generateImage as generateImage,
  experimental_generateSpeech as generateSpeech,
} from "ai";

1. Text Generation

if (frontmatter.text_config) {
  const prompt = await agentmark.loadTextPrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const output = await generateText(vercelInput);

  return NextResponse.json({
    type: "text",
    result: output.text,
    usage: output.usage,
    finishReason: output.finishReason,
    toolCalls: output.toolCalls,
    toolResults: output.toolResults,
  });
}

Response Format

{
  "type": "text",
  "result": "Generated text content",
  "usage": {
    "promptTokens": 0,
    "completionTokens": 0,
    "totalTokens": 0
  },
  "finishReason": "stop",
  "toolCalls": [
    {
      "toolName": "string",
      "args": {},
      "type": "string",
      "toolCallId": "string"
    }
  ],
  "toolResults": [
    {
      "toolCallId": "string",
      "result": "string or object",
      "type": "string",
      "toolName": "string",
      "args": {}
    }
  ]
}

2. Object Generation

if (frontmatter.object_config) {
  const prompt = await agentmark.loadObjectPrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const output = await generateObject(vercelInput);

  return NextResponse.json({
    type: "object",
    result: output.object,
    usage: output.usage,
    finishReason: output.finishReason,
  });
}

Response Format

{
  "type": "object",
  "result": {
    // Structured object response
  },
  "usage": {
    "promptTokens": 0,
    "completionTokens": 0,
    "totalTokens": 0
  },
  "finishReason": "stop"
}

3. Image Generation

if (frontmatter.image_config) {
  const prompt = await agentmark.loadImagePrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const { images } = await generateImage(vercelInput);
  
  return NextResponse.json({
    type: "image",
    result: images.map((i) => ({
      base64: i.base64,
      mimeType: i.mimeType,
    })),
  });
}

Response Format

{
  "type": "image",
  "result": [
    {
      "base64": "base64_encoded_image",
      "mimeType": "image/png"
    }
  ]
}

4. Speech Generation

if (frontmatter.speech_config) {
  const prompt = await agentmark.loadSpeechPrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const { audio } = await generateSpeech(vercelInput);
  
  return NextResponse.json({
    type: "speech",
    result: {
      base64: audio.base64,
      format: audio.format,
      mimeType: audio.mimeType,
    },
  });
}

Response Format

{
  "type": "speech",
  "result": {
    "base64": "base64_encoded_audio",
    "format": "mp3",
    "mimeType": "audio/mpeg"
  }
}

Streaming Responses

Agentmark supports streaming responses for text and object generation back to platform. To enable streaming responses, set the AgentMark-Streaming header to true in your response.

Text Generation Streaming

if (frontmatter.text_config) {
  const prompt = await agentmark.loadTextPrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const { textStream, toolCalls, toolResults, finishReason, usage } = streamText(vercelInput);

  const stream = new ReadableStream({
    async start(controller) {
      const encoder = new TextEncoder();
      for await (const chunk of textStream) {
        const chunkData = encoder.encode(
          JSON.stringify({
            result: chunk,
            type: "text",
          }) + "\n"
        );
        controller.enqueue(chunkData);
      }
      const toolCallsData = await toolCalls;
      const toolResultsData = await toolResults;
      const finishReasonData = await finishReason;
      const usageData = await usage;
      const metadata = {
        usage: usageData,
        toolCalls: toolCallsData,
        toolResults: toolResultsData,
        finishReason: finishReasonData,
      };
      const metadataChunk = JSON.stringify(metadata);
      const metadataChunkData = encoder.encode(metadataChunk);
      controller.enqueue(metadataChunkData);
      controller.close();
    },
  });

  return new NextResponse(stream, {
    headers: {
      "Content-Type": "application/json",
      "Transfer-Encoding": "chunked",
      "AgentMark-Streaming": "true",
    },
  });
}

The stream sends data in chunks, with each chunk being a JSON object followed by a newline:

{"type":"text","result":"First chunk"}
{"type":"text","result":"Second chunk"}
{"type":"text","result":"Third chunk"}
{"usage":{"promptTokens":10,"completionTokens":20,"totalTokens":30},"toolCalls":[...],"toolResults":[...],"finishReason":"stop"}

Object Generation Streaming

if (frontmatter.object_config) {
  const prompt = await agentmark.loadObjectPrompt(data.prompt);
  const vercelInput = await prompt.format({ props });
  const { usage, partialObjectStream } = streamObject(vercelInput);
  
  const stream = new ReadableStream({
    async start(controller) {
      const encoder = new TextEncoder();
      for await (const chunk of partialObjectStream) {
        const chunkData = encoder.encode(
          JSON.stringify({
            result: chunk,
            type: "object",
          }) + "\n"
        );
        controller.enqueue(chunkData);
      }
      const usageData = await usage;
      const metadata = {
        usage: usageData,
      };
      const metadataChunk = JSON.stringify(metadata);
      const metadataChunkData = encoder.encode(metadataChunk);
      controller.enqueue(metadataChunkData);
      controller.close();
    },
  });

  return new NextResponse(stream, {
    headers: {
      "Content-Type": "application/json",
      "Transfer-Encoding": "chunked",
      "AgentMark-Streaming": "true",
    },
  });
}

The stream sends data in chunks, with each chunk being a JSON object followed by a newline:

{"type":"object","result":{"partial":"object"}}
{"type":"object","result":{"partial":"object"}}
{"type":"object","result":{"partial":"object"}}
{"usage":{"promptTokens":10,"completionTokens":20,"totalTokens":30}}

Error Handling

Handle errors appropriately in your webhook:

try {
  // Process prompt run
} catch (error) {
  console.error("Prompt run error:", error);
  return NextResponse.json(
    { message: "Error processing prompt run" },
    { status: 500 }
  );
}

The webhook implementation uses the Vercel AI SDK for generation. Make sure to install the required dependencies: bash npm install ai @ai-sdk/openai

Have Questions?

We’re here to help! Choose the best way to reach us: