---
source: src/test_helpers.rs
expression: "&generated_code"
---
//! Generated types from OpenAPI specification
//!
//! This file contains all the generated types for the API.
//! Do not edit manually - regenerate using the appropriate script.
#![allow(clippy::large_enum_variant)]
#![allow(clippy::format_in_format_args)]
#![allow(clippy::let_unit_value)]
#![allow(unreachable_patterns)]
use serde::{Deserialize, Serialize};
///Represents a streamed chunk of a chat completion response
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CreateChatCompletionStreamResponse {
///A list of chat completion choices
pub choices: Vec<CreateChatCompletionStreamResponseChoicesItem>,
///The Unix timestamp (in seconds) of when the chat completion was created
pub created: i64,
///A unique identifier for the chat completion
pub id: String,
///The model to generate the completion
pub model: String,
///The object type, which is always `chat.completion.chunk`
pub object: CreateChatCompletionStreamResponseObject,
}
///The object type, which is always `chat.completion.chunk`
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default)]
pub enum CreateChatCompletionStreamResponseObject {
#[default]
#[serde(rename = "chat.completion.chunk")]
ChatCompletionChunk,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CreateChatCompletionStreamResponseChoicesItem {
pub delta: ChatCompletionStreamResponseDelta,
///The reason the model stopped generating tokens
#[serde(skip_serializing_if = "Option::is_none")]
pub finish_reason: Option<CreateChatCompletionStreamResponseFinishReason>,
///The index of the choice in the list of choices
pub index: i64,
///Log probability information for the choice
#[serde(skip_serializing_if = "Option::is_none")]
pub logprobs: Option<CreateChatCompletionStreamResponseLogprobs>,
}
///Log probability information for the choice
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CreateChatCompletionStreamResponseLogprobs {
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<Vec<ChatCompletionTokenLogprob>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub refusal: Option<Vec<ChatCompletionTokenLogprob>>,
}
///The reason the model stopped generating tokens
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default)]
pub enum CreateChatCompletionStreamResponseFinishReason {
#[default]
#[serde(rename = "stop")]
Stop,
#[serde(rename = "length")]
Length,
#[serde(rename = "tool_calls")]
ToolCalls,
#[serde(rename = "content_filter")]
ContentFilter,
#[serde(rename = "function_call")]
FunctionCall,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ChatCompletionTokenLogprob {
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes: Option<Vec<i64>>,
pub logprob: f64,
pub token: String,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ChatCompletionStreamResponseDelta {
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub role: Option<ChatCompletionStreamResponseDeltaRole>,
}
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default)]
pub enum ChatCompletionStreamResponseDeltaRole {
#[default]
#[serde(rename = "developer")]
Developer,
#[serde(rename = "system")]
System,
#[serde(rename = "user")]
User,
#[serde(rename = "assistant")]
Assistant,
#[serde(rename = "tool")]
Tool,
}