openmodex 0.1.1

Official Rust SDK for the OpenModex API
Documentation
use crate::client::OpenModex;
use crate::error::Error;
use crate::streaming::ChatCompletionStream;
use crate::types::{ChatCompletionRequest, ChatCompletionResponse};

/// Service for chat completion operations.
///
/// Obtained via [`OpenModex::chat`].
#[derive(Debug)]
pub struct ChatService<'a> {
    client: &'a OpenModex,
}

impl<'a> ChatService<'a> {
    pub(crate) fn new(client: &'a OpenModex) -> Self {
        Self { client }
    }

    /// Access chat completions.
    pub fn completions(&self) -> ChatCompletions<'_> {
        ChatCompletions {
            client: self.client,
        }
    }
}

/// Chat completion operations.
///
/// Obtained via [`ChatService::completions`].
#[derive(Debug)]
pub struct ChatCompletions<'a> {
    client: &'a OpenModex,
}

impl<'a> ChatCompletions<'a> {
    /// Create a chat completion.
    ///
    /// # Examples
    ///
    /// ```no_run
    /// # use openmodex::*;
    /// # #[tokio::main]
    /// # async fn main() -> Result<(), Error> {
    /// let client = OpenModex::new("omx_sk_...")?;
    /// let response = client.chat().completions().create(
    ///     ChatCompletionRequest::new("gpt-4o")
    ///         .message(ChatMessage::user("Hello!"))
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub async fn create(
        &self,
        mut req: ChatCompletionRequest,
    ) -> Result<ChatCompletionResponse, Error> {
        // Apply default model if none specified.
        if req.model.is_empty() {
            if let Some(ref default) = self.client.default_model {
                req.model = default.clone();
            }
        }

        // Ensure stream is false for non-streaming requests.
        req.stream = Some(false);

        let model = req.model.clone();

        self.client
            .with_fallback(&model, |m| {
                let mut r = req.clone();
                r.model = m;
                let client = self.client;
                async move { client.post("/chat/completions", &r).await }
            })
            .await
    }

    /// Create a streaming chat completion.
    ///
    /// Returns a [`ChatCompletionStream`] that implements [`futures::Stream`].
    ///
    /// # Examples
    ///
    /// ```no_run
    /// # use openmodex::*;
    /// # use futures::StreamExt;
    /// # #[tokio::main]
    /// # async fn main() -> Result<(), Error> {
    /// let client = OpenModex::new("omx_sk_...")?;
    /// let mut stream = client.chat().completions().create_stream(
    ///     ChatCompletionRequest::new("gpt-4o")
    ///         .message(ChatMessage::user("Tell me a story"))
    /// ).await?;
    ///
    /// while let Some(chunk) = stream.next().await {
    ///     let chunk = chunk?;
    ///     if let Some(content) = chunk.choices.first().and_then(|c| c.delta.content.as_ref()) {
    ///         print!("{content}");
    ///     }
    /// }
    /// # Ok(())
    /// # }
    /// ```
    pub async fn create_stream(
        &self,
        mut req: ChatCompletionRequest,
    ) -> Result<ChatCompletionStream, Error> {
        // Apply default model if none specified.
        if req.model.is_empty() {
            if let Some(ref default) = self.client.default_model {
                req.model = default.clone();
            }
        }

        // Enable streaming.
        req.stream = Some(true);

        let model = req.model.clone();

        self.client
            .with_fallback(&model, |m| {
                let mut r = req.clone();
                r.model = m;
                let client = self.client;
                async move {
                    let resp = client.post_stream("/chat/completions", &r).await?;
                    Ok(ChatCompletionStream::new(resp))
                }
            })
            .await
    }
}