Skip to content

Chat Completions

Chat completions are the primary API for interacting with AI models. Both runtimes provide a unified interface that works across all 30+ providers.

let client = AiClient::from_model("openai/gpt-4o").await?;
let response = client.chat()
.user("Hello, world!")
.execute()
.await?;
println!("{}", response.content);
client = await AiClient.create("openai/gpt-4o")
response = await client.chat() \
.user("Hello, world!") \
.execute()
print(response.content)

Set the model’s behavior:

// Rust
client.chat()
.system("You are a helpful coding assistant. Always include code examples.")
.user("Explain closures")
.execute().await?;
# Python
await client.chat() \
.system("You are a helpful coding assistant.") \
.user("Explain closures") \
.execute()

Pass conversation history:

// Rust
use ai_lib::{Message, MessageRole};
let messages = vec![
Message::system("You are a tutor."),
Message::user("What is recursion?"),
Message::assistant("Recursion is when a function calls itself..."),
Message::user("Can you show an example?"),
];
client.chat().messages(messages).execute().await?;
# Python
from ai_lib_python import Message
messages = [
Message.system("You are a tutor."),
Message.user("What is recursion?"),
Message.assistant("Recursion is when a function calls itself..."),
Message.user("Can you show an example?"),
]
await client.chat().messages(messages).execute()
ParameterTypeDescription
temperaturefloatRandomness (0.0 = deterministic, 2.0 = creative)
max_tokensintMaximum response length
top_pfloatNucleus sampling (alternative to temperature)
stopstring[]Sequences that stop generation
// Rust
client.chat()
.user("Write a poem")
.temperature(0.9)
.max_tokens(200)
.top_p(0.95)
.execute().await?;

For real-time output, use streaming:

// Rust
let mut stream = client.chat()
.user("Tell me a story")
.stream()
.execute_stream()
.await?;
while let Some(event) = stream.next().await {
if let StreamingEvent::ContentDelta { text, .. } = event? {
print!("{text}");
std::io::stdout().flush()?;
}
}
# Python
async for event in client.chat() \
.user("Tell me a story") \
.stream():
if event.is_content_delta:
print(event.as_content_delta.text, end="", flush=True)

Track usage for cost management:

// Rust
let (response, stats) = client.chat()
.user("Hello")
.execute_with_stats()
.await?;
println!("Prompt tokens: {}", stats.prompt_tokens);
println!("Completion tokens: {}", stats.completion_tokens);
println!("Latency: {}ms", stats.latency_ms);
# Python
response, stats = await client.chat() \
.user("Hello") \
.execute_with_stats()
print(f"Tokens: {stats.total_tokens}")
print(f"Latency: {stats.latency_ms}ms")

The same code works across all providers:

// Just change the model identifier
let client = AiClient::from_model("anthropic/claude-3-5-sonnet").await?;
let client = AiClient::from_model("deepseek/deepseek-chat").await?;
let client = AiClient::from_model("gemini/gemini-2.0-flash").await?;

The protocol manifest handles endpoint URLs, authentication, parameter mapping, and streaming format differences automatically.