openai_struct/models/service_tier.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub ServiceTier : Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized.
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16#[derive(Debug, Serialize, Deserialize)]
17#[serde(rename_all = "lowercase")]
18pub enum ServiceTier {
19 Auto,
20 Default,
21 Flex,
22}
23
24impl Default for ServiceTier {
25 fn default() -> ServiceTier {
26 ServiceTier::Default
27 }
28}
29
30#[test]
31fn test_tier() {
32 assert_eq!(
33 serde_json::to_value(ServiceTier::Auto).unwrap().to_string(),
34 r#""auto""#.to_string()
35 );
36}