Skip to content

AiClient API (Rust)

// Automatic protocol loading
let client = AiClient::from_model("anthropic/claude-3-5-sonnet").await?;
let client = AiClient::builder()
.model("openai/gpt-4o")
.protocol_dir("./ai-protocol")
.timeout(Duration::from_secs(60))
.build()
.await?;

The builder pattern provides a fluent API:

let response = client.chat()
// Messages
.system("You are a helpful assistant")
.user("Hello!")
.messages(vec![Message::user("Follow-up")])
// Parameters
.temperature(0.7)
.max_tokens(1000)
.top_p(0.9)
.stop(vec!["END".into()])
// Tools
.tools(vec![weather_tool])
.tool_choice("auto")
// Execution
.execute()
.await?;
pub struct ChatResponse {
pub content: String, // Response text
pub tool_calls: Vec<ToolCall>, // Function calls (if any)
pub finish_reason: String, // Why the response ended
pub usage: Usage, // Token usage
}
pub enum StreamingEvent {
ContentDelta { text: String, index: usize },
ThinkingDelta { text: String },
ToolCallStarted { id: String, name: String, index: usize },
PartialToolCall { id: String, arguments: String, index: usize },
ToolCallEnded { id: String, index: usize },
StreamEnd { finish_reason: Option<String>, usage: Option<Usage> },
Metadata { model: Option<String>, usage: Option<Usage> },
}
pub struct CallStats {
pub total_tokens: u32,
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub latency_ms: u64,
pub model: String,
pub provider: String,
}
// Simple response
let response = client.chat().user("Hello").execute().await?;
// Response with statistics
let (response, stats) = client.chat().user("Hello").execute_with_stats().await?;
let mut stream = client.chat()
.user("Hello")
.stream()
.execute_stream()
.await?;
while let Some(event) = stream.next().await {
// Handle each StreamingEvent
}
let (mut stream, cancel_handle) = client.chat()
.user("Long task...")
.stream()
.execute_stream_cancellable()
.await?;
// Cancel from another task
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(5)).await;
cancel_handle.cancel();
});
use ai_lib::{Error, ErrorContext};
match client.chat().user("Hello").execute().await {
Ok(response) => println!("{}", response.content),
Err(Error::Protocol(e)) => eprintln!("Protocol error: {e}"),
Err(Error::Transport(e)) => eprintln!("HTTP error: {e}"),
Err(Error::Remote(e)) => {
eprintln!("Provider error: {}", e.error_type);
// e.error_type is one of the 13 standard error classes
}
Err(e) => eprintln!("Other error: {e}"),
}