openai-client-base 0.12.0

Auto-generated Rust client for the OpenAI API
/*
 * OpenAI API
 *
 * The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
 *
 * The version of the OpenAPI document: 2.3.0
 *
 * Generated by: https://openapi-generator.tech
 */

use crate::models;
use serde::{Deserialize, Serialize};

/// EvalRunOutputItemSample : A sample containing the input and output of the evaluation run.
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, bon::Builder)]
pub struct EvalRunOutputItemSample {
    /// An array of input messages.
    #[serde(rename = "input")]
    pub input: Vec<models::EvalRunOutputItemSampleInputInner>,
    /// An array of output messages.
    #[serde(rename = "output")]
    pub output: Vec<models::EvalRunOutputItemSampleOutputInner>,
    /// The reason why the sample generation was finished.
    #[serde(rename = "finish_reason")]
    pub finish_reason: String,
    /// The model used for generating the sample.
    #[serde(rename = "model")]
    pub model: String,
    #[serde(rename = "usage")]
    pub usage: Box<models::EvalRunOutputItemSampleUsage>,
    #[serde(rename = "error")]
    pub error: Box<models::EvalApiError>,
    /// The sampling temperature used.
    #[serde(rename = "temperature")]
    pub temperature: f64,
    /// The maximum number of tokens allowed for completion.
    #[serde(rename = "max_completion_tokens")]
    pub max_completion_tokens: i32,
    /// The top_p value used for sampling.
    #[serde(rename = "top_p")]
    pub top_p: f64,
    /// The seed used for generating the sample.
    #[serde(rename = "seed")]
    pub seed: i32,
}

impl EvalRunOutputItemSample {
    /// A sample containing the input and output of the evaluation run.
    pub fn new(
        input: Vec<models::EvalRunOutputItemSampleInputInner>,
        output: Vec<models::EvalRunOutputItemSampleOutputInner>,
        finish_reason: String,
        model: String,
        usage: models::EvalRunOutputItemSampleUsage,
        error: models::EvalApiError,
        temperature: f64,
        max_completion_tokens: i32,
        top_p: f64,
        seed: i32,
    ) -> EvalRunOutputItemSample {
        EvalRunOutputItemSample {
            input,
            output,
            finish_reason,
            model,
            usage: Box::new(usage),
            error: Box::new(error),
            temperature,
            max_completion_tokens,
            top_p,
            seed,
        }
    }
}

impl std::fmt::Display for EvalRunOutputItemSample {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match serde_json::to_string(self) {
            Ok(s) => write!(f, "{}", s),
            Err(_) => Err(std::fmt::Error),
        }
    }
}