tritonserver_rs/
options.rs

1use std::{
2    ffi::CString, os::unix::prelude::OsStrExt, path::Path, ptr::null_mut, sync::Arc, time::Duration,
3};
4
5use crate::{
6    error::{Error, ErrorCode},
7    path_to_cstring, sys, to_cstring,
8};
9
10#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
11#[repr(u32)]
12/// Triton server control model modes.
13pub enum Control {
14    /// The models in model repository will be loaded on startup. \
15    /// After startup any changes to the model repository will be ignored. \
16    /// Calling Server::poll_model_repository will result in an error.
17    None = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_NONE,
18    /// The models in model repository will be loaded on startup. \
19    /// The model repository can be polled periodically using Server::poll_model_repository and the server will load, \
20    /// unload, and updated models according to changes in the model repository.
21    Poll = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_POLL,
22    /// The models in model repository will not be loaded on startup. \
23    /// The corresponding model control APIs must be called to load / unload a model in the model repository.
24    Explicit = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_EXPLICIT,
25}
26
27/// Triton server rate limit modes.
28#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
29#[repr(u32)]
30pub enum Limit {
31    // The rate limiting is turned off and the inference gets executed whenever an instance is available.
32    Off = sys::tritonserver_ratelimitmode_enum_TRITONSERVER_RATE_LIMIT_OFF,
33    /// The rate limiting prioritizes the inference execution using the number of times each instance has got a chance to run. \
34    /// The execution gets to run only when its resource constraints are satisfied.
35    ExecCount = sys::tritonserver_ratelimitmode_enum_TRITONSERVER_RATE_LIMIT_EXEC_COUNT,
36}
37
38/// Logging Formats
39///
40/// The TRITONSERVER API offers two logging formats.
41/// The formats have a common set of fields but differ in
42/// how the timestamp for a log entry is represented.
43/// Messages are serialized according to JSON\n encoding rules by default.
44/// This behavior can be disabled by setting the environment variable TRITON_SERVER_ESCAPE_LOG_MESSAGES to "0".
45#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
46#[repr(u32)]
47pub enum LogFormat {
48    /// `<level><month><day><hour>:<min>:<sec>.<usec> <pid> <file>:<line>] <msg>`
49    ///
50    /// Example: \
51    /// I0520 20:03:25.829575 3355 model_lifecycle.cc:441] "AsyncLoad() 'simple'"
52    Default = sys::TRITONSERVER_logformat_enum_TRITONSERVER_LOG_DEFAULT,
53    /// `<year>-<month>-<day>T<hour>:<min>:<sec>Z <level> <pid> <file>:<line>] <msg>`
54    ///
55    /// Example: \
56    /// 2024-05-20T20:03:26Z I 3415 model_lifecycle.cc:441] "AsyncLoad() 'simple'"
57    Iso8601 = sys::TRITONSERVER_logformat_enum_TRITONSERVER_LOG_ISO8601,
58}
59
60/// Kinds of instance groups recognized by TRITONSERVER
61#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
62#[repr(u32)]
63pub enum InstanceGroupKind {
64    /// This instance group represents instances that can run on either
65    /// CPU or GPU. If all GPUs listed in 'gpus' are available then
66    /// instances will be created on GPU(s), otherwise instances will
67    /// be created on CPU.
68    Auto = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_AUTO,
69
70    /// This instance group represents instances that must run on the CPU.
71    Cpu = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_CPU,
72
73    /// This instance group represents instances that must run on the GPU.
74    Gpu = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_GPU,
75
76    /// This instance group represents instances that should run on the
77    /// CPU and/or GPU(s) as specified by the model or backend itself.
78    /// The inference server will not override the model/backend settings.
79    Model = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_MODEL,
80}
81
82/// Triton server creation options.
83#[derive(Debug, Clone)]
84pub struct Options(pub(crate) Arc<*mut sys::TRITONSERVER_ServerOptions>);
85
86unsafe impl Send for Options {}
87unsafe impl Sync for Options {}
88
89impl Options {
90    /// Create a new server options object. \
91    /// The path must be the full absolute path to the model repository. \
92    /// This function can be called multiple times with different paths to set multiple model repositories. \
93    /// Note that if a model is not unique across all model repositories at any time, the model will not be available.
94    pub fn new<P: AsRef<Path>>(repository: P) -> Result<Self, Error> {
95        let path = path_to_cstring(repository)?;
96        let mut this = null_mut::<sys::TRITONSERVER_ServerOptions>();
97
98        triton_call!(sys::TRITONSERVER_ServerOptionsNew(&mut this as *mut _))?;
99
100        assert!(!this.is_null());
101        triton_call!(
102            sys::TRITONSERVER_ServerOptionsSetModelRepositoryPath(
103                this,
104                path.as_bytes().as_ptr() as *const _,
105            ),
106            Self(Arc::new(this))
107        )
108    }
109
110    /// Set the textual ID for the server in a server options. The ID is a name that identifies the server.
111    pub fn server_id<I: AsRef<str>>(&mut self, id: I) -> Result<&mut Self, Error> {
112        let id = to_cstring(id)?;
113        triton_call!(
114            sys::TRITONSERVER_ServerOptionsSetServerId(*self.0, id.as_ptr()),
115            self
116        )
117    }
118
119    /// Set the model to be loaded at startup in a server options. \
120    /// The model must be present in one, and only one, of the specified model repositories. \
121    /// This function can be called multiple times with different model name to set multiple startup models. \
122    /// Note that it only takes affect with [Control::Explicit] set.
123    pub fn startup_model<S: AsRef<str>>(&mut self, model: S) -> Result<&mut Self, Error> {
124        let model = to_cstring(model)?;
125        triton_call!(
126            sys::TRITONSERVER_ServerOptionsSetStartupModel(*self.0, model.as_ptr()),
127            self
128        )
129    }
130
131    /// Set the model control mode in a server options. For each mode the models will be managed as the following:
132    ///
133    /// [Control::None]: the models in model repository will be loaded on startup.
134    /// After startup any changes to the model repository will be ignored. Calling [poll_model_repository](crate::Server::poll_model_repository) will result in an error.
135    ///
136    /// [Control::Poll]: the models in model repository will be loaded on startup.
137    /// The model repository can be polled periodically using [poll_model_repository](crate::Server::poll_model_repository) and the server will load,
138    /// unload, and updated models according to changes in the model repository.
139    ///
140    /// [Control::Explicit]: the models in model repository will not be loaded on startup.
141    /// The corresponding model control APIs must be called to load / unload a model in the model repository.
142    pub fn model_control_mode(&mut self, mode: Control) -> Result<&mut Self, Error> {
143        triton_call!(
144            sys::TRITONSERVER_ServerOptionsSetModelControlMode(*self.0, mode as _),
145            self
146        )
147    }
148
149    /// Enable or disable strict model configuration handling in a server options.
150    pub fn strict_model_config(&mut self, enable: bool) -> Result<&mut Self, Error> {
151        triton_call!(
152            sys::TRITONSERVER_ServerOptionsSetStrictModelConfig(*self.0, enable),
153            self
154        )
155    }
156
157    /// Set the custom model configuration name to load for all models.
158    /// Fall back to default config file if empty.
159    ///
160    /// `config_name` The name of the config file to load for all models.
161    pub fn model_config_name<C: AsRef<str>>(&mut self, config_name: C) -> Result<&mut Self, Error> {
162        let name = to_cstring(config_name)?;
163        triton_call!(
164            sys::TRITONSERVER_ServerOptionsSetModelConfigName(*self.0, name.as_ptr(),),
165            self
166        )
167    }
168
169    /// Set the rate limit mode.
170    ///
171    /// [Limit::ExecCount]: The rate limiting prioritizes the inference execution using the number of times each instance has got a chance to run.
172    /// The execution gets to run only when its resource constraints are satisfied.
173    ///
174    /// [Limit::Off]: The rate limiting is turned off and the inference gets executed whenever an instance is available.
175    ///
176    /// By default, execution count is used to determine the priorities.
177    pub fn rate_limiter_mode(&mut self, mode: Limit) -> Result<&mut Self, Error> {
178        triton_call!(
179            sys::TRITONSERVER_ServerOptionsSetRateLimiterMode(*self.0, mode as _),
180            self
181        )
182    }
183
184    /// Add resource count for rate limiting. \
185    /// `name`: The name of the resource. \
186    /// `count`: The count of the resource. \
187    /// `device`: The device identifier for the resource.
188    /// A value of -1 indicates that the specified number of resources are available on every device.
189    ///
190    /// The device value is ignored for a global resource. \
191    /// The server will use the rate limiter configuration specified for instance groups in model config to determine whether resource is global. \
192    /// In case of conflicting resource type in different model configurations, server will raise an appropriate error while loading model.
193    pub fn add_rate_limiter_resource<N: AsRef<str>>(
194        &mut self,
195        name: N,
196        count: u64,
197        device: i32,
198    ) -> Result<&mut Self, Error> {
199        let name = to_cstring(name)?;
200        triton_call!(
201            sys::TRITONSERVER_ServerOptionsAddRateLimiterResource(
202                *self.0,
203                name.as_ptr(),
204                count as usize,
205                device,
206            ),
207            self
208        )
209    }
210
211    /// Set the total pinned memory byte size that the server can allocate . \
212    /// The pinned memory pool will be shared across Triton itself and the backends that use MemoryManager to allocate memory. \
213    /// `size`: The pinned memory pool byte size.
214    pub fn pinned_memory_pool_byte_size(&mut self, size: u64) -> Result<&mut Self, Error> {
215        triton_call!(
216            sys::TRITONSERVER_ServerOptionsSetPinnedMemoryPoolByteSize(*self.0, size),
217            self
218        )
219    }
220
221    /// Set the total CUDA memory byte size that the server can allocate on given GPU device. \
222    /// The pinned memory pool will be shared across Triton itself and the backends that use MemoryManager to allocate memory. \
223    /// `device`: The GPU device to allocate the memory pool. \
224    /// `size`: The pinned memory pool byte size.
225    pub fn cuda_memory_pool_byte_size(
226        &mut self,
227        device: i32,
228        size: u64,
229    ) -> Result<&mut Self, Error> {
230        triton_call!(
231            sys::TRITONSERVER_ServerOptionsSetCudaMemoryPoolByteSize(*self.0, device, size),
232            self
233        )
234    }
235
236    /// Set the size of the virtual address space that will be used
237    /// for growable memory in implicit state.
238    ///
239    /// `gpu_device` The GPU device to set the CUDA virtual address space size \
240    /// `size`` The size of the CUDA virtual address space.
241    pub fn cuda_virtual_address_size(
242        &mut self,
243        device: i32,
244        size: usize,
245    ) -> Result<&mut Self, Error> {
246        triton_call!(
247            sys::TRITONSERVER_ServerOptionsSetCudaVirtualAddressSize(*self.0, device, size),
248            self
249        )
250    }
251
252    /// Deprecated. See [Options::response_cache_config] instead. \
253    /// Set the total response cache byte size that the server can allocate in CPU memory. \
254    /// The response cache will be shared across all inference requests and across all models. \
255    /// `size`: The total response cache byte size.
256    #[deprecated]
257    pub fn response_cache_byte_size(&mut self, size: u64) -> Result<&mut Self, Error> {
258        triton_call!(
259            sys::TRITONSERVER_ServerOptionsSetResponseCacheByteSize(*self.0, size),
260            self
261        )
262    }
263
264    /// Set the directory containing cache shared libraries.
265    /// This directory is searched when looking for cache implementations.
266    ///
267    /// `cache_dir` The full path of the cache directory.
268    pub fn response_cache_directory<P: AsRef<Path>>(
269        &mut self,
270        cache_dir: P,
271    ) -> Result<&mut Self, Error> {
272        let cache_dir = path_to_cstring(cache_dir)?;
273        triton_call!(
274            sys::TRITONSERVER_ServerOptionsSetCacheDirectory(*self.0, cache_dir.as_ptr()),
275            self
276        )
277    }
278
279    /// Set the cache config that will be used to initialize the cache
280    /// implementation for `cache_name``.
281    ///
282    /// It is expected that the `cache_name`` provided matches a directory inside
283    /// the `cache_dir` used for [Options::response_cache_directory].
284    /// The default `cache_dir` is "/opt/tritonserver/caches", so for a `cache_name` of
285    /// "local", Triton would expect to find the "local" cache implementation at
286    /// "/opt/tritonserver/caches/local/libtritoncache_local.so"
287    ///
288    ///  Altogether an example for the "local" cache implementation would look like:
289    /// ```
290    /// let cache_name = "local";
291    /// let config_json = "({\"size\": 1048576})"
292    /// options.response_cache_config(cache_name, config_json)?;
293    /// ```
294    ///    
295    ///
296    /// `cache_name` The name of the cache. Example names would be
297    /// "local", "redis", or the name of a custom cache implementation.\
298    /// `config_json` The string representation of config JSON that is
299    ///  used to initialize the cache implementation.
300    pub fn response_cache_config<N: AsRef<str>, J: AsRef<str>>(
301        &mut self,
302        cache_name: N,
303        config_json: J,
304    ) -> Result<&mut Self, Error> {
305        let name = to_cstring(cache_name)?;
306        let config_json = to_cstring(config_json)?;
307        triton_call!(
308            sys::TRITONSERVER_ServerOptionsSetCacheConfig(
309                *self.0,
310                name.as_ptr(),
311                config_json.as_ptr()
312            ),
313            self
314        )
315    }
316
317    /// Set the minimum support CUDA compute capability. \
318    /// `capability`: The minimum CUDA compute capability.
319    pub fn min_supported_compute_capability(
320        &mut self,
321        capability: f64,
322    ) -> Result<&mut Self, Error> {
323        triton_call!(
324            sys::TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability(*self.0, capability),
325            self
326        )
327    }
328
329    /// Enable or disable exit-on-error. True to enable exiting on initialization error, false to continue.
330    pub fn exit_on_error(&mut self, enable: bool) -> Result<&mut Self, Error> {
331        triton_call!(
332            sys::TRITONSERVER_ServerOptionsSetExitOnError(*self.0, enable),
333            self
334        )
335    }
336
337    /// Enable or disable strict readiness handling.
338    pub fn strict_readiness(&mut self, enable: bool) -> Result<&mut Self, Error> {
339        triton_call!(
340            sys::TRITONSERVER_ServerOptionsSetStrictReadiness(*self.0, enable),
341            self
342        )
343    }
344
345    /// Set the exit timeout.
346    pub fn exit_timeout(&mut self, timeout: Duration) -> Result<&mut Self, Error> {
347        triton_call!(
348            sys::TRITONSERVER_ServerOptionsSetExitTimeout(*self.0, timeout.as_secs().max(1) as _),
349            self
350        )
351    }
352
353    /// Set the number of threads used in buffer manager.
354    pub fn buffer_manager_thread_count(&mut self, thread: usize) -> Result<&mut Self, Error> {
355        triton_call!(
356            sys::TRITONSERVER_ServerOptionsSetBufferManagerThreadCount(*self.0, thread as _),
357            self
358        )
359    }
360
361    /// Set the number of threads to concurrently load models in a server options.
362    ///
363    /// `thread_count` The number of threads.
364    pub fn model_load_thread_count(&mut self, thread_count: usize) -> Result<&mut Self, Error> {
365        triton_call!(
366            sys::TRITONSERVER_ServerOptionsSetModelLoadThreadCount(*self.0, thread_count as _),
367            self
368        )
369    }
370
371    /// Set the number of retry to load a model in a server options.
372    ///
373    /// `retry_count` The number of retry.
374    pub fn model_retry_count(&mut self, retry_count: usize) -> Result<&mut Self, Error> {
375        triton_call!(
376            sys::TRITONSERVER_ServerOptionsSetModelLoadRetryCount(*self.0, retry_count as _),
377            self
378        )
379    }
380
381    /// Enable peer access to allow GPU device to directly access the memory of another GPU device.
382    /// Note that even when this option is set to True, Triton will only try to enable peer access
383    /// and might fail to enable it if the underlying system doesn't support peer access.
384    ///
385    /// `enable_peer_access` Whether to enable peer access or not.
386    pub fn peer_access(&mut self, enable_peer_access: bool) -> Result<&mut Self, Error> {
387        triton_call!(
388            sys::TRITONSERVER_ServerOptionsSetEnablePeerAccess(*self.0, enable_peer_access),
389            self
390        )
391    }
392
393    /// Enable model namespacing to allow serving models with the same name if they are in different namespaces.
394    ///
395    /// `enable_namespace` Whether to enable model namespacing or not.
396    pub fn model_namespacing(&mut self, enable_namespace: bool) -> Result<&mut Self, Error> {
397        triton_call!(
398            sys::TRITONSERVER_ServerOptionsSetModelNamespacing(*self.0, enable_namespace),
399            self
400        )
401    }
402
403    /// Provide a log output file.
404    ///
405    /// `log_file` a string defining the file where the log outputs will be saved.
406    /// An empty string for the file name will cause triton to direct logging facilities to the console.
407    pub fn log_file<P: AsRef<str>>(&mut self, log_file: P) -> Result<&mut Self, Error> {
408        let log_file = to_cstring(log_file)?;
409        triton_call!(
410            sys::TRITONSERVER_ServerOptionsSetLogFile(*self.0, log_file.as_ptr()),
411            self
412        )
413    }
414
415    /// Enable or disable info level logging.
416    pub fn log_info(&mut self, enable: bool) -> Result<&mut Self, Error> {
417        triton_call!(
418            sys::TRITONSERVER_ServerOptionsSetLogInfo(*self.0, enable),
419            self
420        )
421    }
422
423    /// Enable or disable warning level logging.
424    pub fn log_warn(&mut self, enable: bool) -> Result<&mut Self, Error> {
425        triton_call!(
426            sys::TRITONSERVER_ServerOptionsSetLogWarn(*self.0, enable),
427            self
428        )
429    }
430
431    /// Enable or disable error level logging.
432    pub fn log_error(&mut self, enable: bool) -> Result<&mut Self, Error> {
433        triton_call!(
434            sys::TRITONSERVER_ServerOptionsSetLogError(*self.0, enable),
435            self
436        )
437    }
438
439    pub fn log_format(&mut self, log_format: LogFormat) -> Result<&mut Self, Error> {
440        triton_call!(
441            sys::TRITONSERVER_ServerOptionsSetLogFormat(*self.0, log_format as _),
442            self
443        )
444    }
445
446    /// Set verbose logging level. Level zero disables verbose logging.
447    pub fn log_verbose(&mut self, level: i32) -> Result<&mut Self, Error> {
448        triton_call!(
449            sys::TRITONSERVER_ServerOptionsSetLogVerbose(*self.0, level),
450            self
451        )
452    }
453
454    /// Enable or disable metrics collection in a server options.
455    pub fn metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
456        triton_call!(
457            sys::TRITONSERVER_ServerOptionsSetMetrics(*self.0, enable),
458            self
459        )
460    }
461
462    /// Enable or disable GPU metrics collection in a server options.
463    /// GPU metrics are collected if both this option and [Options::metrics] are set.
464    pub fn gpu_metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
465        triton_call!(
466            sys::TRITONSERVER_ServerOptionsSetGpuMetrics(*self.0, enable),
467            self
468        )
469    }
470
471    /// Enable or disable CPU metrics collection in a server options.
472    /// CPU metrics are collected if both this option and [Options::metrics] are true.
473    /// True to enable CPU metrics, false to disable.
474    pub fn cpu_metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
475        triton_call!(
476            sys::TRITONSERVER_ServerOptionsSetCpuMetrics(*self.0, enable),
477            self
478        )
479    }
480
481    /// Set the interval for metrics collection in a server options.
482    /// This is 2000 milliseconds by default.
483    pub fn metrics_interval(&mut self, interval: Duration) -> Result<&mut Self, Error> {
484        triton_call!(
485            sys::TRITONSERVER_ServerOptionsSetMetricsInterval(
486                *self.0,
487                interval.as_millis().max(1) as _,
488            ),
489            self
490        )
491    }
492
493    /// Set the directory containing backend shared libraries. \
494    /// This directory is searched last after the version and model directory
495    /// in the model repository when looking for the backend shared library for a model. \
496    /// If the backend is named 'be' the directory searched is 'backend_dir'/be/libtriton_be.so.
497    pub fn backend_directory<P: AsRef<Path>>(&mut self, path: P) -> Result<&mut Self, Error> {
498        let path = path
499            .as_ref()
500            .canonicalize()
501            .map_err(|err| Error::new(ErrorCode::InvalidArg, err.to_string()))
502            .and_then(|path| {
503                CString::new(path.as_os_str().as_bytes())
504                    .map_err(|err| Error::new(ErrorCode::InvalidArg, err.to_string()))
505            })?;
506        triton_call!(
507            sys::TRITONSERVER_ServerOptionsSetBackendDirectory(*self.0, path.as_ptr()),
508            self
509        )
510    }
511
512    /// Set the directory containing repository agent shared libraries. \
513    /// This directory is searched when looking for the repository agent shared library for a model. \
514    /// If the backend is named 'ra' the directory searched is 'repoagent_dir'/ra/libtritonrepoagent_ra.so.
515    pub fn repo_agent_directory<P: AsRef<Path>>(&mut self, path: P) -> Result<&mut Self, Error> {
516        let path = CString::new(path.as_ref().as_os_str().as_bytes())
517            .map_err(|err| Error::new(ErrorCode::InvalidArg, format!("{}", err)))?;
518        triton_call!(
519            sys::TRITONSERVER_ServerOptionsSetRepoAgentDirectory(*self.0, path.as_ptr()),
520            self
521        )
522    }
523
524    /// Specify the limit on memory usage as a fraction on the device
525    /// identified by 'kind' and 'device_id'. If model loading on the device
526    /// is requested and the current memory usage exceeds the limit, the load will be rejected.
527    /// If not specified, the limit will not be set.
528    ///
529    /// Currently support [InstanceGroupKind::Gpu]
530    ///
531    /// `kind` The kind of the device. \
532    /// `device` The id of the device. \
533    /// `fraction` The limit on memory usage as a fraction.
534    pub fn model_load_device_limit(
535        &mut self,
536        kind: InstanceGroupKind,
537        device: i32,
538        fraction: f64,
539    ) -> Result<&mut Self, Error> {
540        triton_call!(
541            sys::TRITONSERVER_ServerOptionsSetModelLoadDeviceLimit(
542                *self.0, kind as _, device, fraction
543            ),
544            self
545        )
546    }
547
548    /// Set a configuration setting for a named backend in a server options. \
549    /// `name`: The name of the backend. \
550    /// `setting`: The name of the setting. \
551    /// `value`: The setting value.
552    pub fn backend_config<N, S, V>(
553        &mut self,
554        name: N,
555        setting: S,
556        value: V,
557    ) -> Result<&mut Self, Error>
558    where
559        N: AsRef<str>,
560        S: AsRef<str>,
561        V: AsRef<str>,
562    {
563        let name = to_cstring(name)?;
564        let setting = to_cstring(setting)?;
565        let value = to_cstring(value)?;
566
567        triton_call!(
568            sys::TRITONSERVER_ServerOptionsSetBackendConfig(
569                *self.0,
570                name.as_ptr(),
571                setting.as_ptr(),
572                value.as_ptr(),
573            ),
574            self
575        )
576    }
577
578    /// Set a host policy setting for a given policy name in a server options. \
579    /// `name`: The name of the policy. \
580    /// `setting`: The name of the setting. \
581    /// `value`: The setting value.
582    pub fn host_policy<N, S, V>(
583        &mut self,
584        name: N,
585        setting: S,
586        value: V,
587    ) -> Result<&mut Self, Error>
588    where
589        N: AsRef<str>,
590        S: AsRef<str>,
591        V: AsRef<str>,
592    {
593        let name = to_cstring(name)?;
594        let setting = to_cstring(setting)?;
595        let value = to_cstring(value)?;
596
597        triton_call!(
598            sys::TRITONSERVER_ServerOptionsSetHostPolicy(
599                *self.0,
600                name.as_ptr(),
601                setting.as_ptr(),
602                value.as_ptr(),
603            ),
604            self
605        )
606    }
607
608    /// Set a configuration setting for metrics in server options.
609    ///
610    /// `name` The name of the configuration group. An empty string indicates a global configuration option. \
611    /// `setting` The name of the setting. \
612    /// `value` The setting value.
613    pub fn metrics_config<N, S, V>(
614        &mut self,
615        name: N,
616        setting: S,
617        value: V,
618    ) -> Result<&mut Self, Error>
619    where
620        N: AsRef<str>,
621        S: AsRef<str>,
622        V: AsRef<str>,
623    {
624        let name = to_cstring(name)?;
625        let setting = to_cstring(setting)?;
626        let value = to_cstring(value)?;
627
628        triton_call!(
629            sys::TRITONSERVER_ServerOptionsSetMetricsConfig(
630                *self.0,
631                name.as_ptr(),
632                setting.as_ptr(),
633                value.as_ptr()
634            ),
635            self
636        )
637    }
638}
639
640impl Drop for Options {
641    fn drop(&mut self) {
642        if !self.0.is_null() && Arc::strong_count(&self.0) == 1 {
643            unsafe { sys::TRITONSERVER_ServerOptionsDelete(*self.0) };
644        }
645    }
646}