tritonserver_rs/
options.rs

1use std::{ffi::CString, os::unix::prelude::OsStrExt, path::Path, ptr::null_mut, time::Duration};
2
3use crate::{
4    error::{Error, ErrorCode},
5    path_to_cstring, sys, to_cstring,
6};
7
8#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
9#[repr(u32)]
10/// Triton server control model modes.
11pub enum Control {
12    /// The models in model repository will be loaded on startup. \
13    /// After startup any changes to the model repository will be ignored. \
14    /// Calling Server::poll_model_repository will result in an error.
15    None = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_NONE,
16    /// The models in model repository will be loaded on startup. \
17    /// The model repository can be polled periodically using Server::poll_model_repository and the server will load, \
18    /// unload, and updated models according to changes in the model repository.
19    Poll = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_POLL,
20    /// The models in model repository will not be loaded on startup. \
21    /// The corresponding model control APIs must be called to load / unload a model in the model repository.
22    Explicit = sys::tritonserver_modelcontrolmode_enum_TRITONSERVER_MODEL_CONTROL_EXPLICIT,
23}
24
25/// Triton server rate limit modes.
26#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
27#[repr(u32)]
28pub enum Limit {
29    // The rate limiting is turned off and the inference gets executed whenever an instance is available.
30    Off = sys::tritonserver_ratelimitmode_enum_TRITONSERVER_RATE_LIMIT_OFF,
31    /// The rate limiting prioritizes the inference execution using the number of times each instance has got a chance to run. \
32    /// The execution gets to run only when its resource constraints are satisfied.
33    ExecCount = sys::tritonserver_ratelimitmode_enum_TRITONSERVER_RATE_LIMIT_EXEC_COUNT,
34}
35
36/// Logging Formats
37///
38/// The TRITONSERVER API offers two logging formats.
39/// The formats have a common set of fields but differ in
40/// how the timestamp for a log entry is represented.
41/// Messages are serialized according to JSON\n encoding rules by default.
42/// This behavior can be disabled by setting the environment variable TRITON_SERVER_ESCAPE_LOG_MESSAGES to "0".
43#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
44#[repr(u32)]
45pub enum LogFormat {
46    /// `<level><month><day><hour>:<min>:<sec>.<usec> <pid> <file>:<line>] <msg>`
47    ///
48    /// Example: \
49    /// I0520 20:03:25.829575 3355 model_lifecycle.cc:441] "AsyncLoad() 'simple'"
50    Default = sys::TRITONSERVER_logformat_enum_TRITONSERVER_LOG_DEFAULT,
51    /// `<year>-<month>-<day>T<hour>:<min>:<sec>Z <level> <pid> <file>:<line>] <msg>`
52    ///
53    /// Example: \
54    /// 2024-05-20T20:03:26Z I 3415 model_lifecycle.cc:441] "AsyncLoad() 'simple'"
55    Iso8601 = sys::TRITONSERVER_logformat_enum_TRITONSERVER_LOG_ISO8601,
56}
57
58/// Kinds of instance groups recognized by TRITONSERVER
59#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
60#[repr(u32)]
61pub enum InstanceGroupKind {
62    /// This instance group represents instances that can run on either
63    /// CPU or GPU. If all GPUs listed in 'gpus' are available then
64    /// instances will be created on GPU(s), otherwise instances will
65    /// be created on CPU.
66    Auto = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_AUTO,
67
68    /// This instance group represents instances that must run on the CPU.
69    Cpu = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_CPU,
70
71    /// This instance group represents instances that must run on the GPU.
72    Gpu = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_GPU,
73
74    /// This instance group represents instances that should run on the
75    /// CPU and/or GPU(s) as specified by the model or backend itself.
76    /// The inference server will not override the model/backend settings.
77    Model = sys::TRITONSERVER_instancegroupkind_enum_TRITONSERVER_INSTANCEGROUPKIND_MODEL,
78}
79
80/// Triton server creation options.
81#[derive(Debug)]
82pub struct Options(pub(crate) *mut sys::TRITONSERVER_ServerOptions);
83
84impl Options {
85    /// Create a new server options object. \
86    /// The path must be the full absolute path to the model repository. \
87    /// This function can be called multiple times with different paths to set multiple model repositories. \
88    /// Note that if a model is not unique across all model repositories at any time, the model will not be available.
89    pub fn new<P: AsRef<Path>>(repository: P) -> Result<Self, Error> {
90        let path = path_to_cstring(repository)?;
91        let mut this = null_mut::<sys::TRITONSERVER_ServerOptions>();
92
93        triton_call!(sys::TRITONSERVER_ServerOptionsNew(&mut this as *mut _))?;
94
95        assert!(!this.is_null());
96        triton_call!(
97            sys::TRITONSERVER_ServerOptionsSetModelRepositoryPath(
98                this,
99                path.as_bytes().as_ptr() as *const i8,
100            ),
101            Self(this)
102        )
103    }
104
105    /// Set the textual ID for the server in a server options. The ID is a name that identifies the server.
106    pub fn server_id<I: AsRef<str>>(&mut self, id: I) -> Result<&mut Self, Error> {
107        let id = to_cstring(id)?;
108        triton_call!(
109            sys::TRITONSERVER_ServerOptionsSetServerId(self.0, id.as_ptr()),
110            self
111        )
112    }
113
114    /// Set the model to be loaded at startup in a server options. \
115    /// The model must be present in one, and only one, of the specified model repositories. \
116    /// This function can be called multiple times with different model name to set multiple startup models. \
117    /// Note that it only takes affect with [Control::Explicit] set.
118    pub fn startup_model<S: AsRef<str>>(&mut self, model: S) -> Result<&mut Self, Error> {
119        let model = to_cstring(model)?;
120        triton_call!(
121            sys::TRITONSERVER_ServerOptionsSetStartupModel(self.0, model.as_ptr()),
122            self
123        )
124    }
125
126    /// Set the model control mode in a server options. For each mode the models will be managed as the following:
127    ///
128    /// [Control::None]: the models in model repository will be loaded on startup.
129    /// After startup any changes to the model repository will be ignored. Calling [poll_model_repository](crate::Server::poll_model_repository) will result in an error.
130    ///
131    /// [Control::Poll]: the models in model repository will be loaded on startup.
132    /// The model repository can be polled periodically using [poll_model_repository](crate::Server::poll_model_repository) and the server will load,
133    /// unload, and updated models according to changes in the model repository.
134    ///
135    /// [Control::Explicit]: the models in model repository will not be loaded on startup.
136    /// The corresponding model control APIs must be called to load / unload a model in the model repository.
137    pub fn model_control_mode(&mut self, mode: Control) -> Result<&mut Self, Error> {
138        triton_call!(
139            sys::TRITONSERVER_ServerOptionsSetModelControlMode(self.0, mode as _),
140            self
141        )
142    }
143
144    /// Enable or disable strict model configuration handling in a server options.
145    pub fn strict_model_config(&mut self, enable: bool) -> Result<&mut Self, Error> {
146        triton_call!(
147            sys::TRITONSERVER_ServerOptionsSetStrictModelConfig(self.0, enable),
148            self
149        )
150    }
151
152    /// Set the custom model configuration name to load for all models.
153    /// Fall back to default config file if empty.
154    ///
155    /// `config_name` The name of the config file to load for all models.
156    pub fn model_config_name<C: AsRef<str>>(&mut self, config_name: C) -> Result<&mut Self, Error> {
157        let name = to_cstring(config_name)?;
158        triton_call!(
159            sys::TRITONSERVER_ServerOptionsSetModelConfigName(self.0, name.as_ptr(),),
160            self
161        )
162    }
163
164    /// Set the rate limit mode.
165    ///
166    /// [Limit::ExecCount]: The rate limiting prioritizes the inference execution using the number of times each instance has got a chance to run.
167    /// The execution gets to run only when its resource constraints are satisfied.
168    ///
169    /// [Limit::Off]: The rate limiting is turned off and the inference gets executed whenever an instance is available.
170    ///
171    /// By default, execution count is used to determine the priorities.
172    pub fn rate_limiter_mode(&mut self, mode: Limit) -> Result<&mut Self, Error> {
173        triton_call!(
174            sys::TRITONSERVER_ServerOptionsSetRateLimiterMode(self.0, mode as _),
175            self
176        )
177    }
178
179    /// Add resource count for rate limiting. \
180    /// `name`: The name of the resource. \
181    /// `count`: The count of the resource. \
182    /// `device`: The device identifier for the resource.
183    /// A value of -1 indicates that the specified number of resources are available on every device.
184    ///
185    /// The device value is ignored for a global resource. \
186    /// The server will use the rate limiter configuration specified for instance groups in model config to determine whether resource is global. \
187    /// In case of conflicting resource type in different model configurations, server will raise an appropriate error while loading model.
188    pub fn add_rate_limiter_resource<N: AsRef<str>>(
189        &mut self,
190        name: N,
191        count: u64,
192        device: i32,
193    ) -> Result<&mut Self, Error> {
194        let name = to_cstring(name)?;
195        triton_call!(
196            sys::TRITONSERVER_ServerOptionsAddRateLimiterResource(
197                self.0,
198                name.as_ptr(),
199                count as usize,
200                device,
201            ),
202            self
203        )
204    }
205
206    /// Set the total pinned memory byte size that the server can allocate . \
207    /// The pinned memory pool will be shared across Triton itself and the backends that use MemoryManager to allocate memory. \
208    /// `size`: The pinned memory pool byte size.
209    pub fn pinned_memory_pool_byte_size(&mut self, size: u64) -> Result<&mut Self, Error> {
210        triton_call!(
211            sys::TRITONSERVER_ServerOptionsSetPinnedMemoryPoolByteSize(self.0, size),
212            self
213        )
214    }
215
216    /// Set the total CUDA memory byte size that the server can allocate on given GPU device. \
217    /// The pinned memory pool will be shared across Triton itself and the backends that use MemoryManager to allocate memory. \
218    /// `device`: The GPU device to allocate the memory pool. \
219    /// `size`: The pinned memory pool byte size.
220    pub fn cuda_memory_pool_byte_size(
221        &mut self,
222        device: i32,
223        size: u64,
224    ) -> Result<&mut Self, Error> {
225        triton_call!(
226            sys::TRITONSERVER_ServerOptionsSetCudaMemoryPoolByteSize(self.0, device, size),
227            self
228        )
229    }
230
231    /// Set the size of the virtual address space that will be used
232    /// for growable memory in implicit state.
233    ///
234    /// `gpu_device` The GPU device to set the CUDA virtual address space size \
235    /// `size`` The size of the CUDA virtual address space.
236    pub fn cuda_virtual_address_size(
237        &mut self,
238        device: i32,
239        size: usize,
240    ) -> Result<&mut Self, Error> {
241        triton_call!(
242            sys::TRITONSERVER_ServerOptionsSetCudaVirtualAddressSize(self.0, device, size),
243            self
244        )
245    }
246
247    /// Deprecated. See [Options::response_cache_config] instead. \
248    /// Set the total response cache byte size that the server can allocate in CPU memory. \
249    /// The response cache will be shared across all inference requests and across all models. \
250    /// `size`: The total response cache byte size.
251    #[deprecated]
252    pub fn response_cache_byte_size(&mut self, size: u64) -> Result<&mut Self, Error> {
253        triton_call!(
254            sys::TRITONSERVER_ServerOptionsSetResponseCacheByteSize(self.0, size),
255            self
256        )
257    }
258
259    /// Set the directory containing cache shared libraries.
260    /// This directory is searched when looking for cache implementations.
261    ///
262    /// `cache_dir` The full path of the cache directory.
263    pub fn response_cache_directory<P: AsRef<Path>>(
264        &mut self,
265        cache_dir: P,
266    ) -> Result<&mut Self, Error> {
267        let cache_dir = path_to_cstring(cache_dir)?;
268        triton_call!(
269            sys::TRITONSERVER_ServerOptionsSetCacheDirectory(self.0, cache_dir.as_ptr()),
270            self
271        )
272    }
273
274    /// Set the cache config that will be used to initialize the cache
275    /// implementation for `cache_name``.
276    ///
277    /// It is expected that the `cache_name`` provided matches a directory inside
278    /// the `cache_dir` used for [Options::response_cache_directory].
279    /// The default `cache_dir` is "/opt/tritonserver/caches", so for a `cache_name` of
280    /// "local", Triton would expect to find the "local" cache implementation at
281    /// "/opt/tritonserver/caches/local/libtritoncache_local.so"
282    ///
283    ///  Altogether an example for the "local" cache implementation would look like:
284    /// ```
285    /// let cache_name = "local";
286    /// let config_json = "({\"size\": 1048576})"
287    /// options.response_cache_config(cache_name, config_json)?;
288    /// ```
289    ///    
290    ///
291    /// `cache_name` The name of the cache. Example names would be
292    /// "local", "redis", or the name of a custom cache implementation.\
293    /// `config_json` The string representation of config JSON that is
294    ///  used to initialize the cache implementation.
295    pub fn response_cache_config<N: AsRef<str>, J: AsRef<str>>(
296        &mut self,
297        cache_name: N,
298        config_json: J,
299    ) -> Result<&mut Self, Error> {
300        let name = to_cstring(cache_name)?;
301        let config_json = to_cstring(config_json)?;
302        triton_call!(
303            sys::TRITONSERVER_ServerOptionsSetCacheConfig(
304                self.0,
305                name.as_ptr(),
306                config_json.as_ptr()
307            ),
308            self
309        )
310    }
311
312    /// Set the minimum support CUDA compute capability. \
313    /// `capability`: The minimum CUDA compute capability.
314    pub fn min_supported_compute_capability(
315        &mut self,
316        capability: f64,
317    ) -> Result<&mut Self, Error> {
318        triton_call!(
319            sys::TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability(self.0, capability),
320            self
321        )
322    }
323
324    /// Enable or disable exit-on-error. True to enable exiting on initialization error, false to continue.
325    pub fn exit_on_error(&mut self, enable: bool) -> Result<&mut Self, Error> {
326        triton_call!(
327            sys::TRITONSERVER_ServerOptionsSetExitOnError(self.0, enable),
328            self
329        )
330    }
331
332    /// Enable or disable strict readiness handling.
333    pub fn strict_readiness(&mut self, enable: bool) -> Result<&mut Self, Error> {
334        triton_call!(
335            sys::TRITONSERVER_ServerOptionsSetStrictReadiness(self.0, enable),
336            self
337        )
338    }
339
340    /// Set the exit timeout.
341    pub fn exit_timeout(&mut self, timeout: Duration) -> Result<&mut Self, Error> {
342        triton_call!(
343            sys::TRITONSERVER_ServerOptionsSetExitTimeout(self.0, timeout.as_secs().max(1) as _),
344            self
345        )
346    }
347
348    /// Set the number of threads used in buffer manager.
349    pub fn buffer_manager_thread_count(&mut self, thread: usize) -> Result<&mut Self, Error> {
350        triton_call!(
351            sys::TRITONSERVER_ServerOptionsSetBufferManagerThreadCount(self.0, thread as _),
352            self
353        )
354    }
355
356    /// Set the number of threads to concurrently load models in a server options.
357    ///
358    /// `thread_count` The number of threads.
359    pub fn model_load_thread_count(&mut self, thread_count: usize) -> Result<&mut Self, Error> {
360        triton_call!(
361            sys::TRITONSERVER_ServerOptionsSetModelLoadThreadCount(self.0, thread_count as _),
362            self
363        )
364    }
365
366    /// Set the number of retry to load a model in a server options.
367    ///
368    /// `retry_count` The number of retry.
369    pub fn model_retry_count(&mut self, retry_count: usize) -> Result<&mut Self, Error> {
370        triton_call!(
371            sys::TRITONSERVER_ServerOptionsSetModelLoadRetryCount(self.0, retry_count as _),
372            self
373        )
374    }
375
376    /// Enable peer access to allow GPU device to directly access the memory of another GPU device.
377    /// Note that even when this option is set to True, Triton will only try to enable peer access
378    /// and might fail to enable it if the underlying system doesn't support peer access.
379    ///
380    /// `enable_peer_access` Whether to enable peer access or not.
381    pub fn peer_access(&mut self, enable_peer_access: bool) -> Result<&mut Self, Error> {
382        triton_call!(
383            sys::TRITONSERVER_ServerOptionsSetEnablePeerAccess(self.0, enable_peer_access),
384            self
385        )
386    }
387
388    /// Enable model namespacing to allow serving models with the same name if they are in different namespaces.
389    ///
390    /// `enable_namespace` Whether to enable model namespacing or not.
391    pub fn model_namespacing(&mut self, enable_namespace: bool) -> Result<&mut Self, Error> {
392        triton_call!(
393            sys::TRITONSERVER_ServerOptionsSetModelNamespacing(self.0, enable_namespace),
394            self
395        )
396    }
397
398    /// Provide a log output file.
399    ///
400    /// `log_file` a string defining the file where the log outputs will be saved.
401    /// An empty string for the file name will cause triton to direct logging facilities to the console.
402    pub fn log_file<P: AsRef<str>>(&mut self, log_file: P) -> Result<&mut Self, Error> {
403        let log_file = to_cstring(log_file)?;
404        triton_call!(
405            sys::TRITONSERVER_ServerOptionsSetLogFile(self.0, log_file.as_ptr()),
406            self
407        )
408    }
409
410    /// Enable or disable info level logging.
411    pub fn log_info(&mut self, enable: bool) -> Result<&mut Self, Error> {
412        triton_call!(
413            sys::TRITONSERVER_ServerOptionsSetLogInfo(self.0, enable),
414            self
415        )
416    }
417
418    /// Enable or disable warning level logging.
419    pub fn log_warn(&mut self, enable: bool) -> Result<&mut Self, Error> {
420        triton_call!(
421            sys::TRITONSERVER_ServerOptionsSetLogWarn(self.0, enable),
422            self
423        )
424    }
425
426    /// Enable or disable error level logging.
427    pub fn log_error(&mut self, enable: bool) -> Result<&mut Self, Error> {
428        triton_call!(
429            sys::TRITONSERVER_ServerOptionsSetLogError(self.0, enable),
430            self
431        )
432    }
433
434    pub fn log_format(&mut self, log_format: LogFormat) -> Result<&mut Self, Error> {
435        triton_call!(
436            sys::TRITONSERVER_ServerOptionsSetLogFormat(self.0, log_format as _),
437            self
438        )
439    }
440
441    /// Set verbose logging level. Level zero disables verbose logging.
442    pub fn log_verbose(&mut self, level: i32) -> Result<&mut Self, Error> {
443        triton_call!(
444            sys::TRITONSERVER_ServerOptionsSetLogVerbose(self.0, level),
445            self
446        )
447    }
448
449    /// Enable or disable metrics collection in a server options.
450    pub fn metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
451        triton_call!(
452            sys::TRITONSERVER_ServerOptionsSetMetrics(self.0, enable),
453            self
454        )
455    }
456
457    /// Enable or disable GPU metrics collection in a server options.
458    /// GPU metrics are collected if both this option and [Options::metrics] are set.
459    pub fn gpu_metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
460        triton_call!(
461            sys::TRITONSERVER_ServerOptionsSetGpuMetrics(self.0, enable),
462            self
463        )
464    }
465
466    /// Enable or disable CPU metrics collection in a server options.
467    /// CPU metrics are collected if both this option and [Options::metrics] are true.
468    /// True to enable CPU metrics, false to disable.
469    pub fn cpu_metrics(&mut self, enable: bool) -> Result<&mut Self, Error> {
470        triton_call!(
471            sys::TRITONSERVER_ServerOptionsSetCpuMetrics(self.0, enable),
472            self
473        )
474    }
475
476    /// Set the interval for metrics collection in a server options.
477    /// This is 2000 milliseconds by default.
478    pub fn metrics_interval(&mut self, interval: Duration) -> Result<&mut Self, Error> {
479        triton_call!(
480            sys::TRITONSERVER_ServerOptionsSetMetricsInterval(
481                self.0,
482                interval.as_millis().max(1) as _,
483            ),
484            self
485        )
486    }
487
488    /// Set the directory containing backend shared libraries. \
489    /// This directory is searched last after the version and model directory
490    /// in the model repository when looking for the backend shared library for a model. \
491    /// If the backend is named 'be' the directory searched is 'backend_dir'/be/libtriton_be.so.
492    pub fn backend_directory<P: AsRef<Path>>(&mut self, path: P) -> Result<&mut Self, Error> {
493        let path = path
494            .as_ref()
495            .canonicalize()
496            .map_err(|err| Error::new(ErrorCode::InvalidArg, err.to_string()))
497            .and_then(|path| {
498                CString::new(path.as_os_str().as_bytes())
499                    .map_err(|err| Error::new(ErrorCode::InvalidArg, err.to_string()))
500            })?;
501        triton_call!(
502            sys::TRITONSERVER_ServerOptionsSetBackendDirectory(self.0, path.as_ptr()),
503            self
504        )
505    }
506
507    /// Set the directory containing repository agent shared libraries. \
508    /// This directory is searched when looking for the repository agent shared library for a model. \
509    /// If the backend is named 'ra' the directory searched is 'repoagent_dir'/ra/libtritonrepoagent_ra.so.
510    pub fn repo_agent_directory<P: AsRef<Path>>(&mut self, path: P) -> Result<&mut Self, Error> {
511        let path = CString::new(path.as_ref().as_os_str().as_bytes())
512            .map_err(|err| Error::new(ErrorCode::InvalidArg, format!("{}", err)))?;
513        triton_call!(
514            sys::TRITONSERVER_ServerOptionsSetRepoAgentDirectory(self.0, path.as_ptr()),
515            self
516        )
517    }
518
519    /// Specify the limit on memory usage as a fraction on the device
520    /// identified by 'kind' and 'device_id'. If model loading on the device
521    /// is requested and the current memory usage exceeds the limit, the load will be rejected.
522    /// If not specified, the limit will not be set.
523    ///
524    /// Currently support [InstanceGroupKind::Gpu]
525    ///
526    /// `kind` The kind of the device. \
527    /// `device` The id of the device. \
528    /// `fraction` The limit on memory usage as a fraction.
529    pub fn model_load_device_limit(
530        &mut self,
531        kind: InstanceGroupKind,
532        device: i32,
533        fraction: f64,
534    ) -> Result<&mut Self, Error> {
535        triton_call!(
536            sys::TRITONSERVER_ServerOptionsSetModelLoadDeviceLimit(
537                self.0, kind as _, device, fraction
538            ),
539            self
540        )
541    }
542
543    /// Set a configuration setting for a named backend in a server options. \
544    /// `name`: The name of the backend. \
545    /// `setting`: The name of the setting. \
546    /// `value`: The setting value.
547    pub fn backend_config<N, S, V>(
548        &mut self,
549        name: N,
550        setting: S,
551        value: V,
552    ) -> Result<&mut Self, Error>
553    where
554        N: AsRef<str>,
555        S: AsRef<str>,
556        V: AsRef<str>,
557    {
558        let name = to_cstring(name)?;
559        let setting = to_cstring(setting)?;
560        let value = to_cstring(value)?;
561
562        triton_call!(
563            sys::TRITONSERVER_ServerOptionsSetBackendConfig(
564                self.0,
565                name.as_ptr(),
566                setting.as_ptr(),
567                value.as_ptr(),
568            ),
569            self
570        )
571    }
572
573    /// Set a host policy setting for a given policy name in a server options. \
574    /// `name`: The name of the policy. \
575    /// `setting`: The name of the setting. \
576    /// `value`: The setting value.
577    pub fn host_policy<N, S, V>(
578        &mut self,
579        name: N,
580        setting: S,
581        value: V,
582    ) -> Result<&mut Self, Error>
583    where
584        N: AsRef<str>,
585        S: AsRef<str>,
586        V: AsRef<str>,
587    {
588        let name = to_cstring(name)?;
589        let setting = to_cstring(setting)?;
590        let value = to_cstring(value)?;
591
592        triton_call!(
593            sys::TRITONSERVER_ServerOptionsSetHostPolicy(
594                self.0,
595                name.as_ptr(),
596                setting.as_ptr(),
597                value.as_ptr(),
598            ),
599            self
600        )
601    }
602
603    /// Set a configuration setting for metrics in server options.
604    ///
605    /// `name` The name of the configuration group. An empty string indicates a global configuration option. \
606    /// `setting` The name of the setting. \
607    /// `value` The setting value.
608    pub fn metrics_config<N, S, V>(
609        &mut self,
610        name: N,
611        setting: S,
612        value: V,
613    ) -> Result<&mut Self, Error>
614    where
615        N: AsRef<str>,
616        S: AsRef<str>,
617        V: AsRef<str>,
618    {
619        let name = to_cstring(name)?;
620        let setting = to_cstring(setting)?;
621        let value = to_cstring(value)?;
622
623        triton_call!(
624            sys::TRITONSERVER_ServerOptionsSetMetricsConfig(
625                self.0,
626                name.as_ptr(),
627                setting.as_ptr(),
628                value.as_ptr()
629            ),
630            self
631        )
632    }
633}
634
635unsafe impl Send for Options {}
636
637impl Drop for Options {
638    fn drop(&mut self) {
639        if !self.0.is_null() {
640            unsafe {
641                let _ = sys::TRITONSERVER_ServerOptionsDelete(self.0);
642            }
643        }
644    }
645}