Responses API Ready
Native `/responses` + streaming support with automatic fallback to `/chat/completions`.
Provider abstraction for OpenAI/Anthropic/Gemini and more — streaming, tools, multimodal, and proxy-ready.
cargo add llm-connectorOr in Cargo.toml:
[dependencies]
llm-connector = "1.1.12"
tokio = { version = "1", features = ["full"] }use llm_connector::{LlmClient, types::{ChatRequest, Message}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = LlmClient::openai("sk-...", "https://api.openai.com/v1")?;
let request = ChatRequest::new("gpt-4o")
.add_message(Message::user("Hello!"));
let response = client.chat(&request).await?;
println!("{}", response.content);
Ok(())
}