//! # inference-runtime-openai
//!
//! OpenAI Chat Completions runtime + Azure OpenAI variant. Doc §10.3.
//!
//! Implements the [`atomr_infer_core::ModelRunner`] contract over
//! HTTP/2 (via `reqwest`). SSE chunks are parsed by
//! `inference-remote-core::sse` and lifted into provider-typed deltas
//! by [`wire`].
pub use ;
pub use OpenAiPricing;
pub use classify_openai_error;
pub use OpenAiRunner;