The AgentMark client is configured in agentmark.client.ts (or agentmark_client.py). It connects your prompts to AI models, tools, evaluations, and prompt loading — used by the CLI, the platform, and your application code.
Basic Configuration
The client file is generated by npm create agentmark@latest. Each adapter has its own client pattern:
import {
createAgentMarkClient ,
VercelAIModelRegistry ,
} from "@agentmark-ai/ai-sdk-v5-adapter" ;
import { ApiLoader } from "@agentmark-ai/loader-api" ;
import { openai } from "@ai-sdk/openai" ;
const loader =
process . env . NODE_ENV === "development"
? ApiLoader . local ({
baseUrl: process . env . AGENTMARK_BASE_URL || "http://localhost:9418" ,
})
: ApiLoader . cloud ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
});
const modelRegistry = new VercelAIModelRegistry ()
. registerModels ([ "gpt-4o" , "gpt-4o-mini" ], ( name ) => openai ( name ))
. registerModels ([ "dall-e-3" ], ( name ) => openai . image ( name ))
. registerModels ([ "tts-1-hd" ], ( name ) => openai . speech ( name ));
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
});
Install: npm install @agentmark-ai/ai-sdk-v5-adapter @agentmark-ai/loader-api @ai-sdk/openai
import {
createAgentMarkClient ,
ClaudeAgentModelRegistry ,
} from "@agentmark-ai/claude-agent-sdk-v0-adapter" ;
import { ApiLoader } from "@agentmark-ai/loader-api" ;
const loader =
process . env . NODE_ENV === "development"
? ApiLoader . local ({
baseUrl: process . env . AGENTMARK_BASE_URL || "http://localhost:9418" ,
})
: ApiLoader . cloud ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
});
const modelRegistry = ClaudeAgentModelRegistry . createDefault ();
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
adapterOptions: {
permissionMode: "bypassPermissions" ,
maxTurns: 20 ,
},
});
Install: npm install @agentmark-ai/claude-agent-sdk-v0-adapter @agentmark-ai/loader-api
The adapterOptions are unique to this adapter: Option Description permissionMode'default', 'acceptEdits', 'bypassPermissions', or 'plan'maxTurnsMaximum number of agent turns maxBudgetUsdSpending limit per run cwdWorking directory for the agent allowedToolsWhitelist of tool names disallowedToolsBlacklist of tool names
import {
createAgentMarkClient ,
MastraModelRegistry ,
} from "@agentmark-ai/mastra-v0-adapter" ;
import { ApiLoader } from "@agentmark-ai/loader-api" ;
import { openai } from "@ai-sdk/openai" ;
const loader =
process . env . NODE_ENV === "development"
? ApiLoader . local ({
baseUrl: process . env . AGENTMARK_BASE_URL || "http://localhost:9418" ,
})
: ApiLoader . cloud ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
});
const modelRegistry = new MastraModelRegistry ()
. registerModels ([ "gpt-4o" , "gpt-4o-mini" ], ( name ) => openai ( name ));
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
});
Install: npm install @agentmark-ai/mastra-v0-adapter @agentmark-ai/loader-api @ai-sdk/openai
import os
from pathlib import Path
from dotenv import load_dotenv
from agentmark.prompt_core import FileLoader
from agentmark.loader_api import ApiLoader
from agentmark_claude_agent_sdk_v0 import (
create_claude_agent_client,
ClaudeAgentModelRegistry,
ClaudeAgentAdapterOptions,
)
load_dotenv()
if os.getenv( "AGENTMARK_ENV" ) == "development" :
loader = ApiLoader.local(
base_url = os.getenv( "AGENTMARK_BASE_URL" , "http://localhost:9418" )
)
else :
loader = ApiLoader.cloud(
api_key = os.environ[ "AGENTMARK_API_KEY" ],
app_id = os.environ[ "AGENTMARK_APP_ID" ],
)
model_registry = ClaudeAgentModelRegistry.create_default()
client = create_claude_agent_client(
model_registry = model_registry,
loader = loader,
adapter_options = ClaudeAgentAdapterOptions(
permission_mode = "bypassPermissions" ,
),
)
Install: pip install agentmark-claude-agent-sdk-v0 agentmark-prompt-core agentmark-loader-api
See Claude Agent SDK for the full adapter options reference. import os
from agentmark_pydantic_ai_v0 import (
create_pydantic_ai_client,
create_default_model_registry,
)
from agentmark.loader_api import ApiLoader
if os.getenv( "AGENTMARK_ENV" ) == "development" :
loader = ApiLoader.local(
base_url = os.getenv( "AGENTMARK_BASE_URL" , "http://localhost:9418" )
)
else :
loader = ApiLoader.cloud(
api_key = os.environ[ "AGENTMARK_API_KEY" ],
app_id = os.environ[ "AGENTMARK_APP_ID" ],
)
model_registry = create_default_model_registry()
client = create_pydantic_ai_client(
model_registry = model_registry,
loader = loader,
)
Install: pip install agentmark-pydantic-ai-v0 agentmark-loader-api
create_default_model_registry() auto-resolves model names to providers: gpt-* to OpenAI, claude-* to Anthropic, gemini-* to Google, etc.
Prompt Loading
The loader determines how prompts are fetched at runtime. AgentMark provides two loaders:
ApiLoader (Recommended)
FileLoader (Self-Hosted)
Use ApiLoader for both development and production: import { ApiLoader } from "@agentmark-ai/loader-api" ;
// Development — loads from local dev server
const loader = ApiLoader . local ({
baseUrl: "http://localhost:9418" ,
});
// Production — loads from AgentMark Cloud CDN
const loader = ApiLoader . cloud ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
});
ApiLoader.cloud() fetches prompts from the AgentMark API with a 60-second TTL cache. ApiLoader.local() fetches from your running agentmark dev server.Use FileLoader to load pre-built prompts from disk (no cloud dependency): import { FileLoader } from "@agentmark-ai/loader-file" ;
const loader = new FileLoader ( "./dist/agentmark" );
Requires running agentmark build --out dist/agentmark before deployment to compile your .prompt.mdx files into JSON. A common pattern is to use ApiLoader.local() in development and FileLoader in production: import { ApiLoader } from "@agentmark-ai/loader-api" ;
import { FileLoader } from "@agentmark-ai/loader-file" ;
const loader =
process . env . NODE_ENV === "development"
? ApiLoader . local ({ baseUrl: "http://localhost:9418" })
: new FileLoader ( "./dist/agentmark" );
Registering Models
The model registry maps model names (from prompt frontmatter) to actual AI SDK model instances. Each adapter has its own registry class.
import { VercelAIModelRegistry } from "@agentmark-ai/ai-sdk-v5-adapter" ;
import { openai } from "@ai-sdk/openai" ;
import { anthropic } from "@ai-sdk/anthropic" ;
import { google } from "@ai-sdk/google" ;
const modelRegistry = new VercelAIModelRegistry ()
// Language models
. registerModels ([ "gpt-4o" , "gpt-4o-mini" ], ( name ) => openai ( name ))
. registerModels ([ "claude-sonnet-4-20250514" ], ( name ) => anthropic ( name ))
. registerModels ([ "gemini-2.0-flash" ], ( name ) => google ( name ))
// Image models
. registerModels ([ "dall-e-3" ], ( name ) => openai . image ( name ))
// Speech models
. registerModels ([ "tts-1-hd" ], ( name ) => openai . speech ( name ));
You can also use regex patterns for dynamic matching: const modelRegistry = new VercelAIModelRegistry ()
. registerModels ( / ^ gpt-/ , ( name ) => openai ( name ))
. registerModels ( / ^ claude-/ , ( name ) => anthropic ( name ));
import { ClaudeAgentModelRegistry } from "@agentmark-ai/claude-agent-sdk-v0-adapter" ;
// Option 1: Default registry (passes model names through)
const modelRegistry = ClaudeAgentModelRegistry . createDefault ();
// Option 2: Custom configuration per model
const modelRegistry = new ClaudeAgentModelRegistry ()
. registerModels ([ "claude-sonnet-4-20250514" ], ( name ) => ({
model: name ,
maxThinkingTokens: 10000 ,
}));
import { MastraModelRegistry } from "@agentmark-ai/mastra-v0-adapter" ;
import { openai } from "@ai-sdk/openai" ;
const modelRegistry = new MastraModelRegistry ()
. registerModels ([ "gpt-4o" , "gpt-4o-mini" ], ( name ) => openai ( name ));
from agentmark_claude_agent_sdk_v0 import (
ClaudeAgentModelRegistry,
ModelConfig,
)
# Option 1: Default registry (passes model names through)
model_registry = ClaudeAgentModelRegistry.create_default()
# Option 2: Custom configuration per model
model_registry = ClaudeAgentModelRegistry()
model_registry.register_models(
[ "claude-sonnet-4-20250514" ],
lambda name , _ : ModelConfig( model = name)
)
model_registry.register_models(
[ "claude-opus-4-20250514" ],
lambda name , _ : ModelConfig( model = name, max_thinking_tokens = 10000 )
)
from agentmark_pydantic_ai_v0 import (
PydanticAIModelRegistry,
create_default_model_registry,
)
# Option 1: Auto-resolves model names to providers
model_registry = create_default_model_registry()
# Option 2: Custom registry
model_registry = PydanticAIModelRegistry()
model_registry.register_models(
[ "gpt-4o" , "gpt-4o-mini" ],
lambda name , opts = None : f "openai: { name } "
)
Models referenced in prompt frontmatter must be registered in the model registry:
---
text_config :
model_name : gpt-4o
---
Use agentmark pull-models to add built-in models to your agentmark.json. You still need to register them in the client for runtime use.
Registering Tools
Tools allow prompts to call functions during generation. Pass tools directly as a plain object to createAgentMarkClient and reference them by name in prompt frontmatter.
Use the native tool() function from the ai package to define tools: import { createAgentMarkClient , VercelAIModelRegistry } from "@agentmark-ai/ai-sdk-v5-adapter" ;
import { tool } from "ai" ;
import { z } from "zod" ;
const searchTool = tool ({
description: "Search the knowledge base" ,
parameters: z . object ({ query: z . string () }),
execute : async ({ query }) => ({ results: [ `Result for ${ query } ` ] }),
});
const weatherTool = tool ({
description: "Get current weather for a location" ,
parameters: z . object ({ location: z . string () }),
execute : async ({ location }) => ({ temp: 72 , condition: "sunny" }),
});
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
tools: {
search_knowledgebase: searchTool ,
get_weather: weatherTool ,
},
});
The Claude Agent SDK adapter uses mcp_servers instead of tools, since the Claude agent accesses tools through MCP: import { createAgentMarkClient , ClaudeAgentModelRegistry } from "@agentmark-ai/claude-agent-sdk-v0-adapter" ;
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
mcp_servers: {
tools: { url: "https://tools.example.com/mcp" },
},
});
The Python Claude Agent SDK adapter also uses mcp_servers: from agentmark_claude_agent_sdk_v0 import create_claude_agent_client
client = create_claude_agent_client(
model_registry = model_registry,
loader = loader,
mcp_servers = {
"tools" : { "url" : "https://tools.example.com/mcp" },
},
)
Pass tools as a plain object to createAgentMarkClient: import { createAgentMarkClient , MastraModelRegistry } from "@agentmark-ai/mastra-v0-adapter" ;
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
tools: {
search_knowledgebase: searchTool ,
},
});
Pass native Python functions as a tools dict: from agentmark_pydantic_ai_v0 import create_pydantic_ai_client
async def search_knowledgebase ( query : str ) -> dict :
return { "results" : [ f "Result for { query } " ]}
client = create_pydantic_ai_client(
model_registry = model_registry,
tools = { "search_knowledgebase" : search_knowledgebase},
loader = loader,
)
Reference tools in prompt frontmatter:
---
text_config :
model_name : gpt-4o
tools :
- search_knowledgebase
---
Learn more about tools
Registering Evals
Eval functions score prompt outputs during experiments. Score schemas are defined separately in agentmark.json (see Project Config ) and deployed to the platform. Eval functions are registered in your client config and connected to scores by name.
import type { EvalFunction } from "@agentmark-ai/prompt-core" ;
const evals : Record < string , EvalFunction > = {
exact_match : ({ output , expectedOutput }) => {
const match = output === expectedOutput ;
return { score: match ? 1 : 0 , passed: match };
},
contains_keyword : ({ output , expectedOutput }) => {
const contains = String ( output ). includes ( String ( expectedOutput ));
return { passed: contains };
},
};
Pass the evals to your client: export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
evals ,
});
from agentmark.prompt_core import EvalParams, EvalResult
evals = {
"exact_match" : lambda params : {
"passed" : params[ "output" ] == params.get( "expectedOutput" ),
},
"contains_keyword" : lambda params : {
"passed" : str (params.get( "expectedOutput" , "" )) in str (params[ "output" ]),
},
}
client = create_pydantic_ai_client(
model_registry = model_registry,
loader = loader,
evals = evals,
)
from agentmark.prompt_core import EvalParams, EvalResult
from agentmark_claude_agent_sdk_v0 import (
create_claude_agent_client,
ClaudeAgentModelRegistry,
ClaudeAgentAdapterOptions,
)
def exact_match ( params : EvalParams) -> EvalResult:
match = str (params[ "output" ]).strip() == str (params.get( "expectedOutput" , "" )).strip()
return { "passed" : match, "score" : 1.0 if match else 0.0 }
evals = {
"exact_match" : exact_match,
}
client = create_claude_agent_client(
model_registry = ClaudeAgentModelRegistry.create_default(),
loader = loader,
evals = evals,
)
Reference evals in prompt frontmatter:
---
test_settings :
dataset : ./datasets/sentiment.jsonl
evals :
- exact_match
---
The evalRegistry and scores options are deprecated aliases for evals and still work for backward compatibility.
Learn more about evaluations
MCP Servers
MCP servers provide additional tools to your prompts. Pass them as a plain mcpServers object to createAgentMarkClient:
import { createAgentMarkClient , VercelAIModelRegistry } from "@agentmark-ai/ai-sdk-v5-adapter" ;
export const client = createAgentMarkClient ({
loader ,
modelRegistry ,
mcpServers: {
filesystem: {
command: "npx" ,
args: [ "-y" , "@modelcontextprotocol/server-filesystem" , "./docs" ],
},
github: {
command: "npx" ,
args: [ "-y" , "@modelcontextprotocol/server-github" ],
env: { GITHUB_PERSONAL_ACCESS_TOKEN: process . env . GITHUB_TOKEN ! },
},
docs: {
url: "https://docs.example.com/mcp" ,
headers: { Authorization: "Bearer env(MCP_TOKEN)" },
},
},
});
Each key in the mcpServers object is the server name. Local servers use command and args, while remote servers use url and optional headers.
MCP servers configured in agentmark.json are available in the platform editor. MCP servers configured in the client code are available at runtime.
Learn more about MCP
Observability
The AgentMark SDK provides OpenTelemetry-based tracing for monitoring prompts in production.
import { AgentMarkSDK } from "@agentmark-ai/sdk" ;
const sdk = new AgentMarkSDK ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
});
// Initialize tracing (call once at startup)
sdk . initTracing ();
// Use the SDK's built-in loader
const loader = sdk . getApiLoader ();
initTracing() sets up an OpenTelemetry BatchSpanProcessor that exports traces to the AgentMark API. For debugging, use sdk.initTracing({ disableBatch: true }) for immediate span export.To redact sensitive data from traces, pass a mask function. See PII masking . from agentmark_sdk import AgentMarkSDK
sdk = AgentMarkSDK(
api_key = os.environ[ "AGENTMARK_API_KEY" ],
app_id = os.environ[ "AGENTMARK_APP_ID" ],
)
sdk.init_tracing()
To redact sensitive data from traces, pass a mask function. See PII masking .
You can also pass a mask function to redact sensitive data from traces before they leave your application:
import { AgentMarkSDK , createPiiMasker } from '@agentmark-ai/sdk' ;
const sdk = new AgentMarkSDK ({
apiKey: process . env . AGENTMARK_API_KEY ! ,
appId: process . env . AGENTMARK_APP_ID ! ,
mask: createPiiMasker ({ email: true , phone: true , ssn: true }),
});
sdk . initTracing ();
Learn more about PII masking
Learn more about observability
Type Safety
Run agentmark build to generate agentmark.types.ts with TypeScript types for all your prompts. Pass the type to createAgentMarkClient for autocomplete on prompt names, props, and outputs:
import type { AgentMarkTypes } from "./agentmark.types" ;
export const client = createAgentMarkClient < AgentMarkTypes >({
loader ,
modelRegistry ,
});
// Type-checked: prompt name, props, and output
const prompt = await client . loadTextPrompt ( "greeting.prompt.mdx" );
const input = await prompt . format ({
props: { name: "Alice" , role: "developer" }, // type-checked
});
Learn more about type safety
Using the Client
Import the client in your application to load and run prompts:
import { client } from "./agentmark.client" ;
import { generateText } from "ai" ;
const prompt = await client . loadTextPrompt ( "greeting.prompt.mdx" );
const input = await prompt . format ({
props: { name: "Alice" },
telemetry: { isEnabled: true },
});
const result = await generateText ( input );
console . log ( result . text );
import { client } from "./agentmark.client" ;
import { withTracing } from "@agentmark-ai/claude-agent-sdk-v0-adapter" ;
import Anthropic from "@anthropic-ai/claude-code" ;
const anthropic = new Anthropic ();
const prompt = await client . loadTextPrompt ( "agent-task.prompt.mdx" );
const adapted = await prompt . format ({
props: { task: "Refactor the auth module" },
telemetry: { isEnabled: true },
});
const result = await withTracing (
( opts ) => anthropic . messages . stream ( opts ),
adapted
);
for await ( const message of result ) {
console . log ( message );
}
import { client } from "./agentmark.client" ;
const prompt = await client . loadTextPrompt ( "greeting.prompt.mdx" );
const input = await prompt . format ({
props: { name: "Alice" },
telemetry: { isEnabled: true },
});
const result = await agent . generate ( input );
from agentmark_claude_agent_sdk_v0 import run_text_prompt
from agentmark_client import client
prompt = await client.load_text_prompt( "code-reviewer.prompt.mdx" )
params = await prompt.format( props = {
"task" : "Analyze the auth module and suggest improvements"
})
result = await run_text_prompt(params)
print (result.output)
from agentmark_pydantic_ai_v0 import run_text_prompt
from agentmark_client import client
prompt = await client.load_text_prompt( "greeting.prompt.mdx" )
params = await prompt.format( props = { "name" : "Alice" })
result = await run_text_prompt(params)
print (result.output)
Troubleshooting
Issue Solution Model not found Ensure the model name in prompt frontmatter is registered in your model registry Tool not available Check the tool is included in the tools object passed to createAgentMarkClient and the name matches the prompt config Loader connection failed Verify agentmark dev is running for local mode, or check AGENTMARK_API_KEY / AGENTMARK_APP_ID for cloud mode MCP server not connecting Verify the command/args are correct and any required env vars are set Type errors Run agentmark build to regenerate agentmark.types.ts
Next Steps
Running Prompts Use the client to run prompts
Tools & Agents Register and use tools
MCP Integration Connect MCP servers
Type Safety Add TypeScript types
Have Questions? We’re here to help! Choose the best way to reach us: