Custom Providers
When You Need a Custom Provider
Section titled “When You Need a Custom Provider”The built-in providers cover OpenAI, Azure OpenAI, and Anthropic. Build your own when you need to hit a different API — a self-hosted model, a custom gateway, or a provider with a proprietary SDK.
Architecture Overview
Section titled “Architecture Overview”The pipeline looks up two components by the model.provider value in the
.prompty file:
| Component | Responsibility | Keyed by |
|---|---|---|
| Executor | Sends messages to the LLM and returns the raw response | model.provider |
| Processor | Extracts the final result from the raw response | model.provider |
prepare() → messages → Executor.execute() → raw → Processor.process() → resultImplement the Executor
Section titled “Implement the Executor”from __future__ import annotationsfrom typing import Anyfrom prompty.model import Promptyfrom prompty.core.types import Message
class MyExecutor: """Executor for the 'my-custom' provider."""
def execute(self, agent: Prompty, messages: list[Message]) -> Any: import httpx
conn = agent.model.connection payload = { "model": agent.model.id, "messages": [{"role": m.role, "content": m.text} for m in messages], } resp = httpx.post( f"{conn.endpoint}/v1/completions", json=payload, headers={"Authorization": f"Bearer {conn.apiKey}"}, ) resp.raise_for_status() return resp.json()
async def execute_async(self, agent: Prompty, messages: list[Message]) -> Any: import httpx
conn = agent.model.connection payload = { "model": agent.model.id, "messages": [{"role": m.role, "content": m.text} for m in messages], } async with httpx.AsyncClient() as client: resp = await client.post( f"{conn.endpoint}/v1/completions", json=payload, headers={"Authorization": f"Bearer {conn.apiKey}"}, ) resp.raise_for_status() return resp.json()import type { Executor } from "@prompty/core";import type { Prompty, Message } from "@prompty/core";
export const myExecutor: Executor = { async execute(agent: Prompty, messages: Message[]): Promise<unknown> { const conn = agent.model.connection; const resp = await fetch(`${conn.endpoint}/v1/completions`, { method: "POST", headers: { "Content-Type": "application/json", Authorization: `Bearer ${conn.apiKey}`, }, body: JSON.stringify({ model: agent.model.id, messages: messages.map((m) => ({ role: m.role, content: m.text })), }), }); return resp.json(); }, formatToolMessages() { return []; },};using System.Net.Http.Json;using Prompty.Core;
public class MyExecutor : IExecutor{ private static readonly HttpClient _http = new();
public async Task<object> ExecuteAsync(Prompty agent, List<Message> messages) { var conn = agent.Model.Connection; var payload = new { model = agent.Model.Id, messages = messages.Select(m => new { role = m.Role, content = m.Text }), };
_http.DefaultRequestHeaders.Authorization = new("Bearer", conn.ApiKey);
var resp = await _http.PostAsJsonAsync($"{conn.Endpoint}/v1/completions", payload); resp.EnsureSuccessStatusCode(); return (await resp.Content.ReadAsStringAsync())!; }
public List<Message> FormatToolMessages( object raw, List<ToolCall> tc, List<string> tr, string? text) => [];}Implement the Processor
Section titled “Implement the Processor”from __future__ import annotationsfrom typing import Anyfrom prompty.model import Prompty
class MyProcessor: """Extracts text content from the custom provider response."""
def process(self, agent: Prompty, response: Any) -> Any: return response["choices"][0]["message"]["content"]
async def process_async(self, agent: Prompty, response: Any) -> Any: return self.process(agent, response)import type { Processor, Prompty } from "@prompty/core";
export const myProcessor: Processor = { async process(_agent: Prompty, response: unknown): Promise<unknown> { const r = response as any; return r.choices[0].message.content; },};using System.Text.Json;using Prompty.Core;
public class MyProcessor : IProcessor{ public Task<object> ProcessAsync(Prompty agent, object response) { var doc = JsonDocument.Parse((string)response); var content = doc.RootElement .GetProperty("choices")[0] .GetProperty("message") .GetProperty("content") .GetString(); return Task.FromResult<object>(content!); }}Register Your Provider
Section titled “Register Your Provider”Register via entry points in pyproject.toml:
[project.entry-points."prompty.executors"]my-custom = "my_provider.executor:MyExecutor"
[project.entry-points."prompty.processors"]my-custom = "my_provider.processor:MyProcessor"Reinstall after changing entry points:
uv pip install -e .Call the registry functions at startup:
import { registerExecutor, registerProcessor } from "@prompty/core";import { myExecutor } from "./my-provider/executor.js";import { myProcessor } from "./my-provider/processor.js";
registerExecutor("my-custom", myExecutor);registerProcessor("my-custom", myProcessor);Register in your application startup:
using Prompty.Core;
// For custom providers, use InvokerRegistry directlyInvokerRegistry.RegisterExecutor("my-custom", new MyExecutor());InvokerRegistry.RegisterProcessor("my-custom", new MyProcessor());Or create your own builder extension:
public static class MyProviderExtensions{ public static PromptyBuilder AddMyProvider(this PromptyBuilder builder) { InvokerRegistry.RegisterExecutor("my-custom", new MyExecutor()); InvokerRegistry.RegisterProcessor("my-custom", new MyProcessor()); return builder; }}
// Usage:new PromptyBuilder() .AddMyProvider();Use It
Section titled “Use It”1. Write the .prompty File
Section titled “1. Write the .prompty File”Set provider: my-custom in the model block:
---name: custom-llm-chatmodel: id: my-model-name provider: my-custom apiType: chat connection: kind: key endpoint: ${env:MY_LLM_ENDPOINT} apiKey: ${env:MY_LLM_API_KEY} options: temperature: 0.7inputs: - name: question kind: string---system:You are a helpful assistant.
user:{{question}}2. Run It
Section titled “2. Run It”from prompty import invoke
result = invoke("custom-llm.prompty", inputs={"question": "Hello!"})print(result)import { invoke } from "@prompty/core";
const result = await invoke("custom-llm.prompty", { question: "Hello!" });console.log(result);var agent = await PromptyLoader.LoadAsync("custom-llm.prompty");var inputs = new Dictionary<string, object?> { ["question"] = "Hello!" };var result = await Pipeline.InvokeAsync(agent, inputs);Console.WriteLine(result);