async_llm/types/
completion_choice.rs

1use serde::{Deserialize, Serialize};
2
3#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
4pub struct CompletionChoice {
5    /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, or content_filter if content was omitted due to a flag from our content filters.
6    pub finish_reason: Option<String>,
7    pub text: Option<String>,
8    pub index: Option<u32>,
9    pub logprobs: Option<CompletionLogprobs>,
10}
11
12#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
13pub struct CompletionLogprobs {
14    pub tokens: Vec<String>,
15    pub token_logprobs: Vec<Option<f32>>,
16    pub top_logprobs: Vec<serde_json::Value>,
17    pub text_offset: Vec<u32>,
18}