Skip to main content

objectiveai_api/chat/completions/upstream/openrouter/request/
provider.rs

1//! Provider preferences for OpenRouter requests.
2
3use serde::{Deserialize, Serialize};
4
5/// Provider preferences merged from request and Ensemble LLM configuration.
6///
7/// Some fields come from the Ensemble LLM (allow_fallbacks, require_parameters, etc.)
8/// while others come from the request (data_collection, zdr, sort, etc.).
9#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct Provider {
11    /// Whether to allow fallback to other providers. From Ensemble LLM.
12    #[serde(skip_serializing_if = "Option::is_none")]
13    pub allow_fallbacks: Option<bool>,
14    /// Whether to require all parameters. From Ensemble LLM.
15    #[serde(skip_serializing_if = "Option::is_none")]
16    pub require_parameters: Option<bool>,
17    /// Data collection preferences. From request.
18    #[serde(skip_serializing_if = "Option::is_none")]
19    pub data_collection: Option<objectiveai::chat::completions::request::ProviderDataCollection>,
20    /// Zero Data Retention preference. From request.
21    #[serde(skip_serializing_if = "Option::is_none")]
22    pub zdr: Option<bool>,
23    /// Provider order preference. From Ensemble LLM.
24    #[serde(skip_serializing_if = "Option::is_none")]
25    pub order: Option<Vec<String>>,
26    /// Only use these providers. From Ensemble LLM.
27    #[serde(skip_serializing_if = "Option::is_none")]
28    pub only: Option<Vec<String>>,
29    /// Ignore these providers. From Ensemble LLM.
30    #[serde(skip_serializing_if = "Option::is_none")]
31    pub ignore: Option<Vec<String>>,
32    /// Allowed quantizations. From Ensemble LLM.
33    #[serde(skip_serializing_if = "Option::is_none")]
34    pub quantizations: Option<Vec<objectiveai::ensemble_llm::ProviderQuantization>>,
35    /// Provider sort preference. From request.
36    #[serde(skip_serializing_if = "Option::is_none")]
37    pub sort: Option<objectiveai::chat::completions::request::ProviderSort>,
38    /// Maximum price constraints. From request.
39    #[serde(skip_serializing_if = "Option::is_none")]
40    pub max_price: Option<objectiveai::chat::completions::request::ProviderMaxPrice>,
41    /// Preferred minimum throughput. From request.
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub preferred_min_throughput: Option<f64>,
44    /// Preferred maximum latency. From request.
45    #[serde(skip_serializing_if = "Option::is_none")]
46    pub preferred_max_latency: Option<f64>,
47    /// Hard minimum throughput requirement. From request.
48    #[serde(skip_serializing_if = "Option::is_none")]
49    pub min_throughput: Option<f64>,
50    /// Hard maximum latency requirement. From request.
51    #[serde(skip_serializing_if = "Option::is_none")]
52    pub max_latency: Option<f64>,
53}
54
55impl Provider {
56    /// Returns true if all fields are None.
57    pub fn is_empty(&self) -> bool {
58        self.allow_fallbacks.is_none()
59            && self.require_parameters.is_none()
60            && self.data_collection.is_none()
61            && self.zdr.is_none()
62            && self.order.is_none()
63            && self.only.is_none()
64            && self.ignore.is_none()
65            && self.quantizations.is_none()
66            && self.sort.is_none()
67            && self.max_price.is_none()
68            && self.preferred_min_throughput.is_none()
69            && self.preferred_max_latency.is_none()
70            && self.min_throughput.is_none()
71            && self.max_latency.is_none()
72    }
73
74    /// Creates a new Provider by merging request and Ensemble LLM preferences.
75    ///
76    /// Returns None if both inputs are None or if the merged result is empty.
77    pub fn new(
78        request: Option<objectiveai::chat::completions::request::Provider>,
79        ensemble_llm: Option<&objectiveai::ensemble_llm::Provider>,
80    ) -> Option<Self> {
81        let provider = match (request, ensemble_llm) {
82            (
83                Some(objectiveai::chat::completions::request::Provider {
84                    data_collection,
85                    zdr,
86                    sort,
87                    max_price,
88                    preferred_min_throughput,
89                    preferred_max_latency,
90                    min_throughput,
91                    max_latency,
92                }),
93                Some(objectiveai::ensemble_llm::Provider {
94                    allow_fallbacks,
95                    require_parameters,
96                    order,
97                    only,
98                    ignore,
99                    quantizations,
100                }),
101            ) => Self {
102                allow_fallbacks: *allow_fallbacks,
103                require_parameters: *require_parameters,
104                data_collection,
105                zdr,
106                order: order.clone(),
107                only: only.clone(),
108                ignore: ignore.clone(),
109                quantizations: quantizations.clone(),
110                sort,
111                max_price,
112                preferred_min_throughput,
113                preferred_max_latency,
114                min_throughput,
115                max_latency,
116            },
117            (
118                Some(objectiveai::chat::completions::request::Provider {
119                    data_collection,
120                    zdr,
121                    sort,
122                    max_price,
123                    preferred_min_throughput,
124                    preferred_max_latency,
125                    min_throughput,
126                    max_latency,
127                }),
128                None,
129            ) => Self {
130                allow_fallbacks: None,
131                require_parameters: None,
132                data_collection,
133                zdr,
134                order: None,
135                only: None,
136                ignore: None,
137                quantizations: None,
138                sort,
139                max_price,
140                preferred_min_throughput,
141                preferred_max_latency,
142                min_throughput,
143                max_latency,
144            },
145            (
146                None,
147                Some(objectiveai::ensemble_llm::Provider {
148                    allow_fallbacks,
149                    require_parameters,
150                    order,
151                    only,
152                    ignore,
153                    quantizations,
154                }),
155            ) => Self {
156                allow_fallbacks: *allow_fallbacks,
157                require_parameters: *require_parameters,
158                data_collection: None,
159                zdr: None,
160                order: order.clone(),
161                only: only.clone(),
162                ignore: ignore.clone(),
163                quantizations: quantizations.clone(),
164                sort: None,
165                max_price: None,
166                preferred_min_throughput: None,
167                preferred_max_latency: None,
168                min_throughput: None,
169                max_latency: None,
170            },
171            (None, None) => return None,
172        };
173        if provider.is_empty() {
174            None
175        } else {
176            Some(provider)
177        }
178    }
179}