Streaming
Streaming is enabled through provider options and runtime-specific stream APIs.
Callers usually request a raw streaming provider response, then pass it through
process to get clean chunks.
Basic streaming flow
Section titled “Basic streaming flow”agent = prompty.load("chat.prompty")messages = prompty.prepare(agent, inputs={"q": "Hi"})agent.model.options.additionalProperties = {"stream": True}
response = prompty.run(agent, messages, raw=True)for chunk in prompty.process(agent, response): print(chunk, end="", flush=True)agent.model.options.additionalProperties = { stream: true };const response = await run(agent, messages, { raw: true });
for await (const chunk of processResponse(agent, response)) { process.stdout.write(chunk);}agent.Model.Options.AdditionalProperties["stream"] = true;
var response = await Pipeline.RunAsync(agent, messages, raw: true);if (response is PromptyStream stream){ await foreach (var chunk in stream) { Console.Write(chunk); }}use futures::StreamExt;use serde_json::json;
let agent = prompty::load("chat.prompty")?;let messages = prompty::prepare(&agent, Some(&json!({}))).await?;
let mut stream = prompty::run_stream(&agent, &messages).await?;while let Some(chunk) = stream.next().await { print!("{chunk}");}Stream result shapes
Section titled “Stream result shapes”| Type | Description |
|---|---|
PromptyStream | Sync stream wrapper with tracing support |
AsyncPromptyStream | Async stream wrapper with tracing support |
StreamChunk | Schema-level chunk union for text, thinking, tool, and error chunks |
For schema details, see StreamChunk and StreamOptions.