omnillm 0.1.5

Production-grade LLM API gateway with multi-key load balancing, per-key rate limiting, circuit breaking, and cost tracking
Documentation
mod error;
mod helpers;
mod request;
mod response;
mod stream;

pub(super) use error::parse_openai_error;
pub(super) use request::{
    emit_openai_chat_request, emit_openai_responses_request, parse_openai_chat_request,
    parse_openai_responses_request,
};
pub(super) use response::{
    emit_openai_chat_response, emit_openai_responses_response, parse_openai_chat_response,
    parse_openai_responses_response,
};
pub(super) use stream::{
    emit_openai_chat_stream_event, emit_openai_responses_stream_event,
    parse_openai_chat_stream_events, parse_openai_responses_stream_event,
};