1use super::TransformOptionsPayload;
2#[cfg(feature = "azure")]
3use super::azure;
4#[cfg(feature = "gcs")]
5use super::gcs;
6use super::metrics::DEFAULT_MAX_CONCURRENT_TRANSFORMS;
7#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
8use super::remote::STORAGE_DOWNLOAD_TIMEOUT_SECS;
9#[cfg(feature = "s3")]
10use super::s3;
11use super::stderr_write;
12
13use std::collections::HashMap;
14use std::env;
15use std::fmt;
16use std::io;
17use std::path::PathBuf;
18use std::sync::Arc;
19use std::sync::atomic::AtomicU64;
20use url::Url;
21
22#[derive(Debug, Clone, Copy)]
25#[allow(dead_code)]
26pub(super) enum StorageBackendLabel {
27 Filesystem,
28 S3,
29 Gcs,
30 Azure,
31}
32
33#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
36#[derive(Debug, Clone, Copy, PartialEq, Eq)]
37pub enum StorageBackend {
38 Filesystem,
40 #[cfg(feature = "s3")]
42 S3,
43 #[cfg(feature = "gcs")]
45 Gcs,
46 #[cfg(feature = "azure")]
48 Azure,
49}
50
51#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
52impl StorageBackend {
53 pub fn parse(value: &str) -> Result<Self, String> {
55 match value.to_ascii_lowercase().as_str() {
56 "filesystem" | "fs" | "local" => Ok(Self::Filesystem),
57 #[cfg(feature = "s3")]
58 "s3" => Ok(Self::S3),
59 #[cfg(feature = "gcs")]
60 "gcs" => Ok(Self::Gcs),
61 #[cfg(feature = "azure")]
62 "azure" => Ok(Self::Azure),
63 _ => {
64 let mut expected = vec!["filesystem"];
65 #[cfg(feature = "s3")]
66 expected.push("s3");
67 #[cfg(feature = "gcs")]
68 expected.push("gcs");
69 #[cfg(feature = "azure")]
70 expected.push("azure");
71
72 #[allow(unused_mut)]
73 let mut hint = String::new();
74 #[cfg(not(feature = "s3"))]
75 if value.eq_ignore_ascii_case("s3") {
76 hint = " (hint: rebuild with --features s3)".to_string();
77 }
78 #[cfg(not(feature = "gcs"))]
79 if value.eq_ignore_ascii_case("gcs") {
80 hint = " (hint: rebuild with --features gcs)".to_string();
81 }
82 #[cfg(not(feature = "azure"))]
83 if value.eq_ignore_ascii_case("azure") {
84 hint = " (hint: rebuild with --features azure)".to_string();
85 }
86
87 Err(format!(
88 "unknown storage backend `{value}` (expected {}){hint}",
89 expected.join(" or ")
90 ))
91 }
92 }
93 }
94}
95
96pub const DEFAULT_BIND_ADDR: &str = "127.0.0.1:8080";
98
99pub const DEFAULT_STORAGE_ROOT: &str = ".";
101
102pub(super) const DEFAULT_PUBLIC_MAX_AGE_SECONDS: u32 = 3600;
103pub(super) const DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS: u32 = 60;
104
105pub(super) const DEFAULT_TRANSFORM_DEADLINE_SECS: u64 = 30;
108
109pub type LogHandler = Arc<dyn Fn(&str) + Send + Sync>;
120
121pub struct ServerConfig {
122 pub storage_root: PathBuf,
124 pub bearer_token: Option<String>,
126 pub public_base_url: Option<String>,
133 pub signed_url_key_id: Option<String>,
139 pub signed_url_secret: Option<String>,
143 pub signing_keys: HashMap<String, String>,
153 pub allow_insecure_url_sources: bool,
159 pub cache_root: Option<PathBuf>,
166 pub public_max_age_seconds: u32,
171 pub public_stale_while_revalidate_seconds: u32,
176 pub disable_accept_negotiation: bool,
184 pub log_handler: Option<LogHandler>,
190 pub max_concurrent_transforms: u64,
194 pub transform_deadline_secs: u64,
198 pub transforms_in_flight: Arc<AtomicU64>,
204 pub presets: HashMap<String, TransformOptionsPayload>,
209 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
213 pub storage_timeout_secs: u64,
214 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
216 pub storage_backend: StorageBackend,
217 #[cfg(feature = "s3")]
219 pub s3_context: Option<Arc<s3::S3Context>>,
220 #[cfg(feature = "gcs")]
222 pub gcs_context: Option<Arc<gcs::GcsContext>>,
223 #[cfg(feature = "azure")]
225 pub azure_context: Option<Arc<azure::AzureContext>>,
226}
227
228impl Clone for ServerConfig {
229 fn clone(&self) -> Self {
230 Self {
231 storage_root: self.storage_root.clone(),
232 bearer_token: self.bearer_token.clone(),
233 public_base_url: self.public_base_url.clone(),
234 signed_url_key_id: self.signed_url_key_id.clone(),
235 signed_url_secret: self.signed_url_secret.clone(),
236 signing_keys: self.signing_keys.clone(),
237 allow_insecure_url_sources: self.allow_insecure_url_sources,
238 cache_root: self.cache_root.clone(),
239 public_max_age_seconds: self.public_max_age_seconds,
240 public_stale_while_revalidate_seconds: self.public_stale_while_revalidate_seconds,
241 disable_accept_negotiation: self.disable_accept_negotiation,
242 log_handler: self.log_handler.clone(),
243 max_concurrent_transforms: self.max_concurrent_transforms,
244 transform_deadline_secs: self.transform_deadline_secs,
245 transforms_in_flight: Arc::clone(&self.transforms_in_flight),
246 presets: self.presets.clone(),
247 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
248 storage_timeout_secs: self.storage_timeout_secs,
249 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
250 storage_backend: self.storage_backend,
251 #[cfg(feature = "s3")]
252 s3_context: self.s3_context.clone(),
253 #[cfg(feature = "gcs")]
254 gcs_context: self.gcs_context.clone(),
255 #[cfg(feature = "azure")]
256 azure_context: self.azure_context.clone(),
257 }
258 }
259}
260
261impl fmt::Debug for ServerConfig {
262 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
263 let mut d = f.debug_struct("ServerConfig");
264 d.field("storage_root", &self.storage_root)
265 .field(
266 "bearer_token",
267 &self.bearer_token.as_ref().map(|_| "[REDACTED]"),
268 )
269 .field("public_base_url", &self.public_base_url)
270 .field("signed_url_key_id", &self.signed_url_key_id)
271 .field(
272 "signed_url_secret",
273 &self.signed_url_secret.as_ref().map(|_| "[REDACTED]"),
274 )
275 .field(
276 "signing_keys",
277 &self.signing_keys.keys().collect::<Vec<_>>(),
278 )
279 .field(
280 "allow_insecure_url_sources",
281 &self.allow_insecure_url_sources,
282 )
283 .field("cache_root", &self.cache_root)
284 .field("public_max_age_seconds", &self.public_max_age_seconds)
285 .field(
286 "public_stale_while_revalidate_seconds",
287 &self.public_stale_while_revalidate_seconds,
288 )
289 .field(
290 "disable_accept_negotiation",
291 &self.disable_accept_negotiation,
292 )
293 .field("log_handler", &self.log_handler.as_ref().map(|_| ".."))
294 .field("max_concurrent_transforms", &self.max_concurrent_transforms)
295 .field("transform_deadline_secs", &self.transform_deadline_secs)
296 .field("presets", &self.presets.keys().collect::<Vec<_>>());
297 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
298 {
299 d.field("storage_backend", &self.storage_backend);
300 }
301 #[cfg(feature = "s3")]
302 {
303 d.field("s3_context", &self.s3_context.as_ref().map(|_| ".."));
304 }
305 #[cfg(feature = "gcs")]
306 {
307 d.field("gcs_context", &self.gcs_context.as_ref().map(|_| ".."));
308 }
309 #[cfg(feature = "azure")]
310 {
311 d.field("azure_context", &self.azure_context.as_ref().map(|_| ".."));
312 }
313 d.finish()
314 }
315}
316
317impl PartialEq for ServerConfig {
318 fn eq(&self, other: &Self) -> bool {
319 self.storage_root == other.storage_root
320 && self.bearer_token == other.bearer_token
321 && self.public_base_url == other.public_base_url
322 && self.signed_url_key_id == other.signed_url_key_id
323 && self.signed_url_secret == other.signed_url_secret
324 && self.signing_keys == other.signing_keys
325 && self.allow_insecure_url_sources == other.allow_insecure_url_sources
326 && self.cache_root == other.cache_root
327 && self.public_max_age_seconds == other.public_max_age_seconds
328 && self.public_stale_while_revalidate_seconds
329 == other.public_stale_while_revalidate_seconds
330 && self.disable_accept_negotiation == other.disable_accept_negotiation
331 && self.max_concurrent_transforms == other.max_concurrent_transforms
332 && self.transform_deadline_secs == other.transform_deadline_secs
333 && self.presets == other.presets
334 && cfg_storage_eq(self, other)
335 }
336}
337
338fn cfg_storage_eq(_this: &ServerConfig, _other: &ServerConfig) -> bool {
339 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
340 {
341 if _this.storage_backend != _other.storage_backend {
342 return false;
343 }
344 }
345 #[cfg(feature = "s3")]
346 {
347 if _this
348 .s3_context
349 .as_ref()
350 .map(|c| (&c.default_bucket, &c.endpoint_url))
351 != _other
352 .s3_context
353 .as_ref()
354 .map(|c| (&c.default_bucket, &c.endpoint_url))
355 {
356 return false;
357 }
358 }
359 #[cfg(feature = "gcs")]
360 {
361 if _this
362 .gcs_context
363 .as_ref()
364 .map(|c| (&c.default_bucket, &c.endpoint_url))
365 != _other
366 .gcs_context
367 .as_ref()
368 .map(|c| (&c.default_bucket, &c.endpoint_url))
369 {
370 return false;
371 }
372 }
373 #[cfg(feature = "azure")]
374 {
375 if _this
376 .azure_context
377 .as_ref()
378 .map(|c| (&c.default_container, &c.endpoint_url))
379 != _other
380 .azure_context
381 .as_ref()
382 .map(|c| (&c.default_container, &c.endpoint_url))
383 {
384 return false;
385 }
386 }
387 true
388}
389
390impl Eq for ServerConfig {}
391
392impl ServerConfig {
393 pub fn new(storage_root: PathBuf, bearer_token: Option<String>) -> Self {
408 Self {
409 storage_root,
410 bearer_token,
411 public_base_url: None,
412 signed_url_key_id: None,
413 signed_url_secret: None,
414 signing_keys: HashMap::new(),
415 allow_insecure_url_sources: false,
416 cache_root: None,
417 public_max_age_seconds: DEFAULT_PUBLIC_MAX_AGE_SECONDS,
418 public_stale_while_revalidate_seconds: DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS,
419 disable_accept_negotiation: false,
420 log_handler: None,
421 max_concurrent_transforms: DEFAULT_MAX_CONCURRENT_TRANSFORMS,
422 transform_deadline_secs: DEFAULT_TRANSFORM_DEADLINE_SECS,
423 transforms_in_flight: Arc::new(AtomicU64::new(0)),
424 presets: HashMap::new(),
425 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
426 storage_timeout_secs: STORAGE_DOWNLOAD_TIMEOUT_SECS,
427 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
428 storage_backend: StorageBackend::Filesystem,
429 #[cfg(feature = "s3")]
430 s3_context: None,
431 #[cfg(feature = "gcs")]
432 gcs_context: None,
433 #[cfg(feature = "azure")]
434 azure_context: None,
435 }
436 }
437
438 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
439 pub(super) fn storage_backend_label(&self) -> StorageBackendLabel {
440 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
441 {
442 match self.storage_backend {
443 StorageBackend::Filesystem => StorageBackendLabel::Filesystem,
444 #[cfg(feature = "s3")]
445 StorageBackend::S3 => StorageBackendLabel::S3,
446 #[cfg(feature = "gcs")]
447 StorageBackend::Gcs => StorageBackendLabel::Gcs,
448 #[cfg(feature = "azure")]
449 StorageBackend::Azure => StorageBackendLabel::Azure,
450 }
451 }
452 #[cfg(not(any(feature = "s3", feature = "gcs", feature = "azure")))]
453 {
454 StorageBackendLabel::Filesystem
455 }
456 }
457
458 pub(super) fn log(&self, msg: &str) {
461 if let Some(handler) = &self.log_handler {
462 handler(msg);
463 } else {
464 stderr_write(msg);
465 }
466 }
467
468 pub fn with_signed_url_credentials(
486 mut self,
487 key_id: impl Into<String>,
488 secret: impl Into<String>,
489 ) -> Self {
490 let key_id = key_id.into();
491 let secret = secret.into();
492 self.signing_keys.insert(key_id.clone(), secret.clone());
493 self.signed_url_key_id = Some(key_id);
494 self.signed_url_secret = Some(secret);
495 self
496 }
497
498 pub fn with_signing_keys(mut self, keys: HashMap<String, String>) -> Self {
504 self.signing_keys.extend(keys);
505 self
506 }
507
508 pub fn with_insecure_url_sources(mut self, allow_insecure_url_sources: bool) -> Self {
525 self.allow_insecure_url_sources = allow_insecure_url_sources;
526 self
527 }
528
529 pub fn with_cache_root(mut self, cache_root: impl Into<PathBuf>) -> Self {
545 self.cache_root = Some(cache_root.into());
546 self
547 }
548
549 #[cfg(feature = "s3")]
551 pub fn with_s3_context(mut self, context: s3::S3Context) -> Self {
552 self.storage_backend = StorageBackend::S3;
553 self.s3_context = Some(Arc::new(context));
554 self
555 }
556
557 #[cfg(feature = "gcs")]
559 pub fn with_gcs_context(mut self, context: gcs::GcsContext) -> Self {
560 self.storage_backend = StorageBackend::Gcs;
561 self.gcs_context = Some(Arc::new(context));
562 self
563 }
564
565 #[cfg(feature = "azure")]
567 pub fn with_azure_context(mut self, context: azure::AzureContext) -> Self {
568 self.storage_backend = StorageBackend::Azure;
569 self.azure_context = Some(Arc::new(context));
570 self
571 }
572
573 pub fn with_presets(mut self, presets: HashMap<String, TransformOptionsPayload>) -> Self {
575 self.presets = presets;
576 self
577 }
578
579 pub fn from_env() -> io::Result<Self> {
654 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
655 let storage_backend = match env::var("TRUSS_STORAGE_BACKEND")
656 .ok()
657 .filter(|v| !v.is_empty())
658 {
659 Some(value) => StorageBackend::parse(&value)
660 .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
661 None => StorageBackend::Filesystem,
662 };
663
664 let storage_root =
665 env::var("TRUSS_STORAGE_ROOT").unwrap_or_else(|_| DEFAULT_STORAGE_ROOT.to_string());
666 let storage_root = PathBuf::from(storage_root).canonicalize()?;
667 let bearer_token = env::var("TRUSS_BEARER_TOKEN")
668 .ok()
669 .filter(|value| !value.is_empty());
670 let public_base_url = env::var("TRUSS_PUBLIC_BASE_URL")
671 .ok()
672 .filter(|value| !value.is_empty())
673 .map(validate_public_base_url)
674 .transpose()?;
675 let signed_url_key_id = env::var("TRUSS_SIGNED_URL_KEY_ID")
676 .ok()
677 .filter(|value| !value.is_empty());
678 let signed_url_secret = env::var("TRUSS_SIGNED_URL_SECRET")
679 .ok()
680 .filter(|value| !value.is_empty());
681
682 if signed_url_key_id.is_some() != signed_url_secret.is_some() {
683 return Err(io::Error::new(
684 io::ErrorKind::InvalidInput,
685 "TRUSS_SIGNED_URL_KEY_ID and TRUSS_SIGNED_URL_SECRET must be set together",
686 ));
687 }
688
689 let mut signing_keys = HashMap::new();
690 if let (Some(kid), Some(sec)) = (&signed_url_key_id, &signed_url_secret) {
691 signing_keys.insert(kid.clone(), sec.clone());
692 }
693 if let Ok(json) = env::var("TRUSS_SIGNING_KEYS")
694 && !json.is_empty()
695 {
696 let extra: HashMap<String, String> = serde_json::from_str(&json).map_err(|e| {
697 io::Error::new(
698 io::ErrorKind::InvalidInput,
699 format!("TRUSS_SIGNING_KEYS must be valid JSON: {e}"),
700 )
701 })?;
702 for (kid, sec) in &extra {
703 if kid.is_empty() || sec.is_empty() {
704 return Err(io::Error::new(
705 io::ErrorKind::InvalidInput,
706 "TRUSS_SIGNING_KEYS must not contain empty key IDs or secrets",
707 ));
708 }
709 }
710 signing_keys.extend(extra);
711 }
712
713 if !signing_keys.is_empty() && public_base_url.is_none() {
714 eprintln!(
715 "truss: warning: signing keys are configured but TRUSS_PUBLIC_BASE_URL is not. \
716 Behind a reverse proxy or CDN the Host header may differ from the externally \
717 visible authority, causing signed URL verification to fail. Consider setting \
718 TRUSS_PUBLIC_BASE_URL to the canonical external origin."
719 );
720 }
721
722 let cache_root = env::var("TRUSS_CACHE_ROOT")
723 .ok()
724 .filter(|value| !value.is_empty())
725 .map(PathBuf::from);
726
727 let public_max_age_seconds = parse_optional_env_u32("TRUSS_PUBLIC_MAX_AGE")?
728 .unwrap_or(DEFAULT_PUBLIC_MAX_AGE_SECONDS);
729 let public_stale_while_revalidate_seconds =
730 parse_optional_env_u32("TRUSS_PUBLIC_STALE_WHILE_REVALIDATE")?
731 .unwrap_or(DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS);
732
733 let allow_insecure_url_sources = env_flag("TRUSS_ALLOW_INSECURE_URL_SOURCES");
734
735 let max_concurrent_transforms = match env::var("TRUSS_MAX_CONCURRENT_TRANSFORMS")
736 .ok()
737 .filter(|v| !v.is_empty())
738 {
739 Some(value) => {
740 let n: u64 = value.parse().map_err(|_| {
741 io::Error::new(
742 io::ErrorKind::InvalidInput,
743 "TRUSS_MAX_CONCURRENT_TRANSFORMS must be a positive integer",
744 )
745 })?;
746 if n == 0 || n > 1024 {
747 return Err(io::Error::new(
748 io::ErrorKind::InvalidInput,
749 "TRUSS_MAX_CONCURRENT_TRANSFORMS must be between 1 and 1024",
750 ));
751 }
752 n
753 }
754 None => DEFAULT_MAX_CONCURRENT_TRANSFORMS,
755 };
756
757 let transform_deadline_secs = match env::var("TRUSS_TRANSFORM_DEADLINE_SECS")
758 .ok()
759 .filter(|v| !v.is_empty())
760 {
761 Some(value) => {
762 let secs: u64 = value.parse().map_err(|_| {
763 io::Error::new(
764 io::ErrorKind::InvalidInput,
765 "TRUSS_TRANSFORM_DEADLINE_SECS must be a positive integer",
766 )
767 })?;
768 if secs == 0 || secs > 300 {
769 return Err(io::Error::new(
770 io::ErrorKind::InvalidInput,
771 "TRUSS_TRANSFORM_DEADLINE_SECS must be between 1 and 300",
772 ));
773 }
774 secs
775 }
776 None => DEFAULT_TRANSFORM_DEADLINE_SECS,
777 };
778
779 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
780 let storage_timeout_secs = match env::var("TRUSS_STORAGE_TIMEOUT_SECS")
781 .ok()
782 .filter(|v| !v.is_empty())
783 {
784 Some(value) => {
785 let secs: u64 = value.parse().map_err(|_| {
786 io::Error::new(
787 io::ErrorKind::InvalidInput,
788 "TRUSS_STORAGE_TIMEOUT_SECS must be a positive integer",
789 )
790 })?;
791 if secs == 0 || secs > 300 {
792 return Err(io::Error::new(
793 io::ErrorKind::InvalidInput,
794 "TRUSS_STORAGE_TIMEOUT_SECS must be between 1 and 300",
795 ));
796 }
797 secs
798 }
799 None => STORAGE_DOWNLOAD_TIMEOUT_SECS,
800 };
801
802 #[cfg(feature = "s3")]
803 let s3_context = if storage_backend == StorageBackend::S3 {
804 let bucket = env::var("TRUSS_S3_BUCKET")
805 .ok()
806 .filter(|v| !v.is_empty())
807 .ok_or_else(|| {
808 io::Error::new(
809 io::ErrorKind::InvalidInput,
810 "TRUSS_S3_BUCKET is required when TRUSS_STORAGE_BACKEND=s3",
811 )
812 })?;
813 Some(Arc::new(s3::build_s3_context(
814 bucket,
815 allow_insecure_url_sources,
816 )?))
817 } else {
818 None
819 };
820
821 #[cfg(feature = "gcs")]
822 let gcs_context = if storage_backend == StorageBackend::Gcs {
823 let bucket = env::var("TRUSS_GCS_BUCKET")
824 .ok()
825 .filter(|v| !v.is_empty())
826 .ok_or_else(|| {
827 io::Error::new(
828 io::ErrorKind::InvalidInput,
829 "TRUSS_GCS_BUCKET is required when TRUSS_STORAGE_BACKEND=gcs",
830 )
831 })?;
832 Some(Arc::new(gcs::build_gcs_context(
833 bucket,
834 allow_insecure_url_sources,
835 )?))
836 } else {
837 if env::var("TRUSS_GCS_BUCKET")
838 .ok()
839 .filter(|v| !v.is_empty())
840 .is_some()
841 {
842 eprintln!(
843 "truss: warning: TRUSS_GCS_BUCKET is set but TRUSS_STORAGE_BACKEND is not \
844 `gcs`. The GCS bucket will be ignored. Set TRUSS_STORAGE_BACKEND=gcs to \
845 enable the GCS backend."
846 );
847 }
848 None
849 };
850
851 #[cfg(feature = "azure")]
852 let azure_context = if storage_backend == StorageBackend::Azure {
853 let container = env::var("TRUSS_AZURE_CONTAINER")
854 .ok()
855 .filter(|v| !v.is_empty())
856 .ok_or_else(|| {
857 io::Error::new(
858 io::ErrorKind::InvalidInput,
859 "TRUSS_AZURE_CONTAINER is required when TRUSS_STORAGE_BACKEND=azure",
860 )
861 })?;
862 Some(Arc::new(azure::build_azure_context(
863 container,
864 allow_insecure_url_sources,
865 )?))
866 } else {
867 if env::var("TRUSS_AZURE_CONTAINER")
868 .ok()
869 .filter(|v| !v.is_empty())
870 .is_some()
871 {
872 eprintln!(
873 "truss: warning: TRUSS_AZURE_CONTAINER is set but TRUSS_STORAGE_BACKEND is not \
874 `azure`. The Azure container will be ignored. Set TRUSS_STORAGE_BACKEND=azure to \
875 enable the Azure backend."
876 );
877 }
878 None
879 };
880
881 let presets = parse_presets_from_env()?;
882
883 Ok(Self {
884 storage_root,
885 bearer_token,
886 public_base_url,
887 signed_url_key_id,
888 signed_url_secret,
889 signing_keys,
890 allow_insecure_url_sources,
891 cache_root,
892 public_max_age_seconds,
893 public_stale_while_revalidate_seconds,
894 disable_accept_negotiation: env_flag("TRUSS_DISABLE_ACCEPT_NEGOTIATION"),
895 log_handler: None,
896 max_concurrent_transforms,
897 transform_deadline_secs,
898 transforms_in_flight: Arc::new(AtomicU64::new(0)),
899 presets,
900 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
901 storage_timeout_secs,
902 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
903 storage_backend,
904 #[cfg(feature = "s3")]
905 s3_context,
906 #[cfg(feature = "gcs")]
907 gcs_context,
908 #[cfg(feature = "azure")]
909 azure_context,
910 })
911 }
912}
913
914pub(super) fn env_flag(name: &str) -> bool {
915 env::var(name)
916 .map(|value| {
917 matches!(
918 value.as_str(),
919 "1" | "true" | "TRUE" | "yes" | "YES" | "on" | "ON"
920 )
921 })
922 .unwrap_or(false)
923}
924
925pub(super) fn parse_optional_env_u32(name: &str) -> io::Result<Option<u32>> {
926 match env::var(name) {
927 Ok(value) if !value.is_empty() => value.parse::<u32>().map(Some).map_err(|_| {
928 io::Error::new(
929 io::ErrorKind::InvalidInput,
930 format!("{name} must be a non-negative integer"),
931 )
932 }),
933 _ => Ok(None),
934 }
935}
936
937pub(super) fn parse_presets_from_env() -> io::Result<HashMap<String, TransformOptionsPayload>> {
938 let (json_str, source) = match env::var("TRUSS_PRESETS_FILE")
939 .ok()
940 .filter(|v| !v.is_empty())
941 {
942 Some(path) => {
943 let content = std::fs::read_to_string(&path).map_err(|e| {
944 io::Error::new(
945 io::ErrorKind::InvalidInput,
946 format!("failed to read TRUSS_PRESETS_FILE `{path}`: {e}"),
947 )
948 })?;
949 (content, format!("TRUSS_PRESETS_FILE `{path}`"))
950 }
951 None => match env::var("TRUSS_PRESETS").ok().filter(|v| !v.is_empty()) {
952 Some(value) => (value, "TRUSS_PRESETS".to_string()),
953 None => return Ok(HashMap::new()),
954 },
955 };
956
957 serde_json::from_str::<HashMap<String, TransformOptionsPayload>>(&json_str).map_err(|e| {
958 io::Error::new(
959 io::ErrorKind::InvalidInput,
960 format!("{source} must be valid JSON: {e}"),
961 )
962 })
963}
964
965pub(super) fn validate_public_base_url(value: String) -> io::Result<String> {
966 let parsed = Url::parse(&value).map_err(|error| {
967 io::Error::new(
968 io::ErrorKind::InvalidInput,
969 format!("TRUSS_PUBLIC_BASE_URL must be a valid URL: {error}"),
970 )
971 })?;
972
973 match parsed.scheme() {
974 "http" | "https" => Ok(parsed.to_string()),
975 _ => Err(io::Error::new(
976 io::ErrorKind::InvalidInput,
977 "TRUSS_PUBLIC_BASE_URL must use http or https",
978 )),
979 }
980}