AgentMark allows you to collaborate on prompts while maintaining type safety in a production environment.

Example

Here’s a prompt with schema definitions:

math/addition.prompt.mdx
---
name: math-addition
text_config:
  model_name: gpt-4o
  schema:
    type: "object"
    properties:
      sum:
        type: "number"
        description: "The sum of the two numbers"
      explanation:
        type: "string"
        description: "Step by step explanation"
    required: ["sum", "explanation"]
input_schema:
  type: "object"
  properties:
    num1:
      type: "number"
      description: "First number to add"
    num2:
      type: "number"
      description: "Second number to add"
  required: ["num1", "num2"]
---

<System>You are a helpful math assistant that performs addition.</System>
<User>What is the sum of {props.num1} and {props.num2}?</User>

Running generate-types will create:

agentmark.types.ts
// Auto-generated types from AgentMark
// Do not edit this file directly

interface Math$AdditionIn {
  /** First number to add */
  num1: number;
  /** Second number to add */
  num2: number;
}

interface Math$AdditionOut {
  /** The sum of the two numbers */
  sum: number;
  /** Step by step explanation */
  explanation: string;
}

interface Math$Addition {
  input: Math$AdditionIn;
  output: Math$AdditionOut;
  kind: "object";
}

export default interface AgentMarkTypes {
  "math/addition.prompt.mdx": Math$Addition;
}

Generating Types

AgentMark provides a CLI to generate types from your prompts:

Local Development
# Generate types from a running AgentMark server
npx @agentmark/cli generate-types --local 9002 > agentmark.types.ts

Using Generated Types

The AgentMark SDK is fully type-safe when using generated types:

import { AgentMarkSDK } from "@agentmark/sdk";
import {
  createAgentMarkClient,
  VercelAIModelRegistry,
} from "@agentmark/vercel-ai-v4-adapter";
import { openai } from "@ai-sdk/openai";
import { generateObject } from "ai";
import AgentMarkTypes from "./agentmark.types";

const sdk = new AgentMarkSDK({ apiKey, appId });

// Initialize tracing for observability
const tracer = sdk.initTracing();

const modelRegistry = new VercelAIModelRegistry();

modelRegistry.registerModels(["gpt-4o-mini"], (name: string) => {
  return openai(name);
});

const agentMark = createAgentMarkClient<AgentMarkTypes>({
  loader: sdk.getFileLoader(),
  modelRegistry,
});

const run = async () => {
  // Load prompt with type safety
  const prompt = await agentMark.loadObjectPrompt("math/addition.prompt.mdx");

  // Format to vercel input with type safety and telemetry
  const vercelInput = await prompt.format({
    props: {
      num1: 5,
      num2: 3,
    },
    telemetry: {
      isEnabled: true,
      functionId: "example-function",
      metadata: { userId: "user-123" },
    },
  });

  const result = await generateObject(vercelInput);

  // Type-safe access to results
  const sum = result.object.sum;
  console.log(result.object.explanation); // Also type-safe
};

// Note: You only need to shutdown the tracer for local/short running tasks.
run().then(() => tracer.shutdown());

Best Practices

  1. Development:

    • Use --local during development
    • Keep types in sync with your prompts
  2. CICD (coming soon):

    • Run type checking against the AgentMark platform
    • Check for any diffs between AgentMark and your branch, and fail if there are any

Further Reading