1use super::TransformOptionsPayload;
2#[cfg(feature = "azure")]
3use super::azure;
4#[cfg(feature = "gcs")]
5use super::gcs;
6pub(super) const DEFAULT_MAX_CONCURRENT_TRANSFORMS: u64 = 64;
8#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
9use super::remote::STORAGE_DOWNLOAD_TIMEOUT_SECS;
10#[cfg(feature = "s3")]
11use super::s3;
12use super::stderr_write;
13
14use std::collections::HashMap;
15use std::env;
16use std::fmt;
17use std::io;
18use std::path::PathBuf;
19use std::sync::Arc;
20use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU64, Ordering};
21use url::Url;
22
23#[repr(u8)]
33#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
34pub enum LogLevel {
35 Error = 0,
37 Warn = 1,
39 Info = 2,
41 Debug = 3,
43}
44
45impl LogLevel {
46 pub(super) fn cycle(self) -> Self {
49 match self {
50 Self::Info => Self::Debug,
51 Self::Debug => Self::Error,
52 Self::Error => Self::Warn,
53 Self::Warn => Self::Info,
54 }
55 }
56
57 pub(super) fn from_u8(v: u8) -> Self {
59 match v {
60 0 => Self::Error,
61 1 => Self::Warn,
62 2 => Self::Info,
63 3 => Self::Debug,
64 _ => Self::Info,
65 }
66 }
67
68 pub(super) fn as_str(self) -> &'static str {
70 match self {
71 Self::Error => "error",
72 Self::Warn => "warn",
73 Self::Info => "info",
74 Self::Debug => "debug",
75 }
76 }
77}
78
79impl fmt::Display for LogLevel {
80 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
81 f.write_str(self.as_str())
82 }
83}
84
85impl std::str::FromStr for LogLevel {
86 type Err = String;
87
88 fn from_str(s: &str) -> Result<Self, Self::Err> {
89 match s.to_ascii_lowercase().as_str() {
90 "error" => Ok(Self::Error),
91 "warn" => Ok(Self::Warn),
92 "info" => Ok(Self::Info),
93 "debug" => Ok(Self::Debug),
94 _ => Err(format!(
95 "invalid log level `{s}`: expected error, warn, info, or debug"
96 )),
97 }
98 }
99}
100
101#[derive(Debug, Clone, Copy)]
104#[allow(dead_code)]
105pub(super) enum StorageBackendLabel {
106 Filesystem,
107 S3,
108 Gcs,
109 Azure,
110}
111
112#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
115#[derive(Debug, Clone, Copy, PartialEq, Eq)]
116pub enum StorageBackend {
117 Filesystem,
119 #[cfg(feature = "s3")]
121 S3,
122 #[cfg(feature = "gcs")]
124 Gcs,
125 #[cfg(feature = "azure")]
127 Azure,
128}
129
130#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
131impl StorageBackend {
132 pub fn parse(value: &str) -> Result<Self, String> {
134 match value.to_ascii_lowercase().as_str() {
135 "filesystem" | "fs" | "local" => Ok(Self::Filesystem),
136 #[cfg(feature = "s3")]
137 "s3" => Ok(Self::S3),
138 #[cfg(feature = "gcs")]
139 "gcs" => Ok(Self::Gcs),
140 #[cfg(feature = "azure")]
141 "azure" => Ok(Self::Azure),
142 _ => {
143 let mut expected = vec!["filesystem"];
144 #[cfg(feature = "s3")]
145 expected.push("s3");
146 #[cfg(feature = "gcs")]
147 expected.push("gcs");
148 #[cfg(feature = "azure")]
149 expected.push("azure");
150
151 #[allow(unused_mut)]
152 let mut hint = String::new();
153 #[cfg(not(feature = "s3"))]
154 if value.eq_ignore_ascii_case("s3") {
155 hint = " (hint: rebuild with --features s3)".to_string();
156 }
157 #[cfg(not(feature = "gcs"))]
158 if value.eq_ignore_ascii_case("gcs") {
159 hint = " (hint: rebuild with --features gcs)".to_string();
160 }
161 #[cfg(not(feature = "azure"))]
162 if value.eq_ignore_ascii_case("azure") {
163 hint = " (hint: rebuild with --features azure)".to_string();
164 }
165
166 Err(format!(
167 "unknown storage backend `{value}` (expected {}){hint}",
168 expected.join(" or ")
169 ))
170 }
171 }
172 }
173}
174
175pub const DEFAULT_BIND_ADDR: &str = "127.0.0.1:8080";
177
178pub const DEFAULT_STORAGE_ROOT: &str = ".";
180
181pub(super) const DEFAULT_PUBLIC_MAX_AGE_SECONDS: u32 = 3600;
182pub(super) const DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS: u32 = 60;
183
184pub(super) const DEFAULT_SHUTDOWN_DRAIN_SECS: u64 = 10;
187
188pub(super) const DEFAULT_TRANSFORM_DEADLINE_SECS: u64 = 30;
191
192pub(super) const DEFAULT_MAX_INPUT_PIXELS: u64 = 40_000_000;
195
196pub(super) const DEFAULT_KEEP_ALIVE_MAX_REQUESTS: u64 = 100;
200
201use super::http_parse::DEFAULT_MAX_UPLOAD_BODY_BYTES;
202
203pub type LogHandler = Arc<dyn Fn(&str) + Send + Sync>;
214
215pub struct ServerConfig {
216 pub storage_root: PathBuf,
218 pub bearer_token: Option<String>,
220 pub public_base_url: Option<String>,
227 pub signed_url_key_id: Option<String>,
233 pub signed_url_secret: Option<String>,
237 pub signing_keys: HashMap<String, String>,
247 pub allow_insecure_url_sources: bool,
253 pub cache_root: Option<PathBuf>,
260 pub cache_max_bytes: u64,
269 pub public_max_age_seconds: u32,
274 pub public_stale_while_revalidate_seconds: u32,
279 pub disable_accept_negotiation: bool,
287 pub format_preference: Vec<crate::MediaType>,
296 pub log_handler: Option<LogHandler>,
302 pub log_level: Arc<AtomicU8>,
307 pub max_concurrent_transforms: u64,
311 pub transform_deadline_secs: u64,
315 pub max_input_pixels: u64,
320 pub max_upload_bytes: usize,
325 pub keep_alive_max_requests: u64,
329 pub metrics_token: Option<String>,
335 pub disable_metrics: bool,
339 pub health_cache_min_free_bytes: Option<u64>,
344 pub health_max_memory_bytes: Option<u64>,
349 pub shutdown_drain_secs: u64,
358 pub draining: Arc<AtomicBool>,
363 pub custom_response_headers: Vec<(String, String)>,
368 pub max_source_bytes: u64,
372 pub max_watermark_bytes: u64,
376 pub max_remote_redirects: usize,
380 pub enable_compression: bool,
384 pub compression_level: u32,
390 pub transforms_in_flight: Arc<AtomicU64>,
396 pub presets: Arc<std::sync::RwLock<HashMap<String, TransformOptionsPayload>>>,
402 pub presets_file_path: Option<PathBuf>,
408 pub rate_limiter: Option<Arc<super::rate_limit::RateLimiter>>,
415 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
419 pub storage_timeout_secs: u64,
420 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
422 pub storage_backend: StorageBackend,
423 #[cfg(feature = "s3")]
425 pub s3_context: Option<Arc<s3::S3Context>>,
426 #[cfg(feature = "gcs")]
428 pub gcs_context: Option<Arc<gcs::GcsContext>>,
429 #[cfg(feature = "azure")]
431 pub azure_context: Option<Arc<azure::AzureContext>>,
432}
433
434impl Clone for ServerConfig {
435 fn clone(&self) -> Self {
436 Self {
437 storage_root: self.storage_root.clone(),
438 bearer_token: self.bearer_token.clone(),
439 public_base_url: self.public_base_url.clone(),
440 signed_url_key_id: self.signed_url_key_id.clone(),
441 signed_url_secret: self.signed_url_secret.clone(),
442 signing_keys: self.signing_keys.clone(),
443 allow_insecure_url_sources: self.allow_insecure_url_sources,
444 cache_root: self.cache_root.clone(),
445 cache_max_bytes: self.cache_max_bytes,
446 public_max_age_seconds: self.public_max_age_seconds,
447 public_stale_while_revalidate_seconds: self.public_stale_while_revalidate_seconds,
448 disable_accept_negotiation: self.disable_accept_negotiation,
449 format_preference: self.format_preference.clone(),
450 log_handler: self.log_handler.clone(),
451 log_level: Arc::clone(&self.log_level),
452 max_concurrent_transforms: self.max_concurrent_transforms,
453 transform_deadline_secs: self.transform_deadline_secs,
454 max_input_pixels: self.max_input_pixels,
455 max_upload_bytes: self.max_upload_bytes,
456 keep_alive_max_requests: self.keep_alive_max_requests,
457 metrics_token: self.metrics_token.clone(),
458 disable_metrics: self.disable_metrics,
459 health_cache_min_free_bytes: self.health_cache_min_free_bytes,
460 health_max_memory_bytes: self.health_max_memory_bytes,
461 shutdown_drain_secs: self.shutdown_drain_secs,
462 draining: Arc::clone(&self.draining),
463 custom_response_headers: self.custom_response_headers.clone(),
464 max_source_bytes: self.max_source_bytes,
465 max_watermark_bytes: self.max_watermark_bytes,
466 max_remote_redirects: self.max_remote_redirects,
467 enable_compression: self.enable_compression,
468 compression_level: self.compression_level,
469 transforms_in_flight: Arc::clone(&self.transforms_in_flight),
470 presets: Arc::clone(&self.presets),
471 presets_file_path: self.presets_file_path.clone(),
472 rate_limiter: self.rate_limiter.as_ref().map(Arc::clone),
473 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
474 storage_timeout_secs: self.storage_timeout_secs,
475 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
476 storage_backend: self.storage_backend,
477 #[cfg(feature = "s3")]
478 s3_context: self.s3_context.clone(),
479 #[cfg(feature = "gcs")]
480 gcs_context: self.gcs_context.clone(),
481 #[cfg(feature = "azure")]
482 azure_context: self.azure_context.clone(),
483 }
484 }
485}
486
487impl fmt::Debug for ServerConfig {
488 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
489 let mut d = f.debug_struct("ServerConfig");
490 d.field("storage_root", &self.storage_root)
491 .field(
492 "bearer_token",
493 &self.bearer_token.as_ref().map(|_| "[REDACTED]"),
494 )
495 .field("public_base_url", &self.public_base_url)
496 .field("signed_url_key_id", &self.signed_url_key_id)
497 .field(
498 "signed_url_secret",
499 &self.signed_url_secret.as_ref().map(|_| "[REDACTED]"),
500 )
501 .field(
502 "signing_keys",
503 &self.signing_keys.keys().collect::<Vec<_>>(),
504 )
505 .field(
506 "allow_insecure_url_sources",
507 &self.allow_insecure_url_sources,
508 )
509 .field("cache_root", &self.cache_root)
510 .field("cache_max_bytes", &self.cache_max_bytes)
511 .field("public_max_age_seconds", &self.public_max_age_seconds)
512 .field(
513 "public_stale_while_revalidate_seconds",
514 &self.public_stale_while_revalidate_seconds,
515 )
516 .field(
517 "disable_accept_negotiation",
518 &self.disable_accept_negotiation,
519 )
520 .field("format_preference", &self.format_preference)
521 .field("log_handler", &self.log_handler.as_ref().map(|_| ".."))
522 .field("log_level", &self.current_log_level())
523 .field("max_concurrent_transforms", &self.max_concurrent_transforms)
524 .field("transform_deadline_secs", &self.transform_deadline_secs)
525 .field("max_input_pixels", &self.max_input_pixels)
526 .field("max_upload_bytes", &self.max_upload_bytes)
527 .field("keep_alive_max_requests", &self.keep_alive_max_requests)
528 .field(
529 "metrics_token",
530 &self.metrics_token.as_ref().map(|_| "[REDACTED]"),
531 )
532 .field("disable_metrics", &self.disable_metrics)
533 .field(
534 "health_cache_min_free_bytes",
535 &self.health_cache_min_free_bytes,
536 )
537 .field("health_max_memory_bytes", &self.health_max_memory_bytes)
538 .field("shutdown_drain_secs", &self.shutdown_drain_secs)
539 .field(
540 "custom_response_headers",
541 &self.custom_response_headers.len(),
542 )
543 .field("enable_compression", &self.enable_compression)
544 .field("compression_level", &self.compression_level)
545 .field(
546 "presets",
547 &self
548 .presets
549 .read()
550 .map(|p| p.keys().cloned().collect::<Vec<_>>())
551 .unwrap_or_default(),
552 )
553 .field("presets_file_path", &self.presets_file_path)
554 .field("rate_limiter", &self.rate_limiter.is_some());
555 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
556 {
557 d.field("storage_backend", &self.storage_backend);
558 }
559 #[cfg(feature = "s3")]
560 {
561 d.field("s3_context", &self.s3_context.as_ref().map(|_| ".."));
562 }
563 #[cfg(feature = "gcs")]
564 {
565 d.field("gcs_context", &self.gcs_context.as_ref().map(|_| ".."));
566 }
567 #[cfg(feature = "azure")]
568 {
569 d.field("azure_context", &self.azure_context.as_ref().map(|_| ".."));
570 }
571 d.finish()
572 }
573}
574
575impl PartialEq for ServerConfig {
576 fn eq(&self, other: &Self) -> bool {
577 self.storage_root == other.storage_root
578 && self.bearer_token == other.bearer_token
579 && self.public_base_url == other.public_base_url
580 && self.signed_url_key_id == other.signed_url_key_id
581 && self.signed_url_secret == other.signed_url_secret
582 && self.signing_keys == other.signing_keys
583 && self.allow_insecure_url_sources == other.allow_insecure_url_sources
584 && self.cache_root == other.cache_root
585 && self.cache_max_bytes == other.cache_max_bytes
586 && self.public_max_age_seconds == other.public_max_age_seconds
587 && self.public_stale_while_revalidate_seconds
588 == other.public_stale_while_revalidate_seconds
589 && self.disable_accept_negotiation == other.disable_accept_negotiation
590 && self.format_preference == other.format_preference
591 && self.max_concurrent_transforms == other.max_concurrent_transforms
592 && self.transform_deadline_secs == other.transform_deadline_secs
593 && self.max_input_pixels == other.max_input_pixels
594 && self.max_upload_bytes == other.max_upload_bytes
595 && self.keep_alive_max_requests == other.keep_alive_max_requests
596 && self.metrics_token == other.metrics_token
597 && self.disable_metrics == other.disable_metrics
598 && self.health_cache_min_free_bytes == other.health_cache_min_free_bytes
599 && self.health_max_memory_bytes == other.health_max_memory_bytes
600 && self.shutdown_drain_secs == other.shutdown_drain_secs
601 && self.custom_response_headers == other.custom_response_headers
602 && self.max_source_bytes == other.max_source_bytes
603 && self.max_watermark_bytes == other.max_watermark_bytes
604 && self.max_remote_redirects == other.max_remote_redirects
605 && self.enable_compression == other.enable_compression
606 && self.compression_level == other.compression_level
607 && *self.presets.read().unwrap() == *other.presets.read().unwrap()
608 && self.presets_file_path == other.presets_file_path
609 && self.rate_limiter.is_some() == other.rate_limiter.is_some()
610 && cfg_storage_eq(self, other)
611 }
612}
613
614fn cfg_storage_eq(_this: &ServerConfig, _other: &ServerConfig) -> bool {
615 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
616 {
617 if _this.storage_backend != _other.storage_backend {
618 return false;
619 }
620 }
621 #[cfg(feature = "s3")]
622 {
623 if _this
624 .s3_context
625 .as_ref()
626 .map(|c| (&c.default_bucket, &c.endpoint_url))
627 != _other
628 .s3_context
629 .as_ref()
630 .map(|c| (&c.default_bucket, &c.endpoint_url))
631 {
632 return false;
633 }
634 }
635 #[cfg(feature = "gcs")]
636 {
637 if _this
638 .gcs_context
639 .as_ref()
640 .map(|c| (&c.default_bucket, &c.endpoint_url))
641 != _other
642 .gcs_context
643 .as_ref()
644 .map(|c| (&c.default_bucket, &c.endpoint_url))
645 {
646 return false;
647 }
648 }
649 #[cfg(feature = "azure")]
650 {
651 if _this
652 .azure_context
653 .as_ref()
654 .map(|c| (&c.default_container, &c.endpoint_url))
655 != _other
656 .azure_context
657 .as_ref()
658 .map(|c| (&c.default_container, &c.endpoint_url))
659 {
660 return false;
661 }
662 }
663 true
664}
665
666impl Eq for ServerConfig {}
667
668impl ServerConfig {
669 pub fn new(storage_root: PathBuf, bearer_token: Option<String>) -> Self {
684 Self {
685 storage_root,
686 bearer_token,
687 public_base_url: None,
688 signed_url_key_id: None,
689 signed_url_secret: None,
690 signing_keys: HashMap::new(),
691 allow_insecure_url_sources: false,
692 cache_root: None,
693 cache_max_bytes: 0,
694 public_max_age_seconds: DEFAULT_PUBLIC_MAX_AGE_SECONDS,
695 public_stale_while_revalidate_seconds: DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS,
696 disable_accept_negotiation: false,
697 format_preference: Vec::new(),
698 log_handler: None,
699 log_level: Arc::new(AtomicU8::new(LogLevel::Info as u8)),
700 max_concurrent_transforms: DEFAULT_MAX_CONCURRENT_TRANSFORMS,
701 transform_deadline_secs: DEFAULT_TRANSFORM_DEADLINE_SECS,
702 max_input_pixels: DEFAULT_MAX_INPUT_PIXELS,
703 max_upload_bytes: DEFAULT_MAX_UPLOAD_BODY_BYTES,
704 keep_alive_max_requests: DEFAULT_KEEP_ALIVE_MAX_REQUESTS,
705 metrics_token: None,
706 disable_metrics: false,
707 health_cache_min_free_bytes: None,
708 health_max_memory_bytes: None,
709 shutdown_drain_secs: DEFAULT_SHUTDOWN_DRAIN_SECS,
710 draining: Arc::new(AtomicBool::new(false)),
711 custom_response_headers: Vec::new(),
712 max_source_bytes: super::remote::MAX_SOURCE_BYTES,
713 max_watermark_bytes: super::remote::MAX_WATERMARK_BYTES,
714 max_remote_redirects: super::remote::MAX_REMOTE_REDIRECTS,
715 enable_compression: true,
716 compression_level: 1,
717 transforms_in_flight: Arc::new(AtomicU64::new(0)),
718 presets: Arc::new(std::sync::RwLock::new(HashMap::new())),
719 presets_file_path: None,
720 rate_limiter: None,
721 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
722 storage_timeout_secs: STORAGE_DOWNLOAD_TIMEOUT_SECS,
723 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
724 storage_backend: StorageBackend::Filesystem,
725 #[cfg(feature = "s3")]
726 s3_context: None,
727 #[cfg(feature = "gcs")]
728 gcs_context: None,
729 #[cfg(feature = "azure")]
730 azure_context: None,
731 }
732 }
733
734 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
735 pub(super) fn storage_backend_label(&self) -> StorageBackendLabel {
736 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
737 {
738 match self.storage_backend {
739 StorageBackend::Filesystem => StorageBackendLabel::Filesystem,
740 #[cfg(feature = "s3")]
741 StorageBackend::S3 => StorageBackendLabel::S3,
742 #[cfg(feature = "gcs")]
743 StorageBackend::Gcs => StorageBackendLabel::Gcs,
744 #[cfg(feature = "azure")]
745 StorageBackend::Azure => StorageBackendLabel::Azure,
746 }
747 }
748 #[cfg(not(any(feature = "s3", feature = "gcs", feature = "azure")))]
749 {
750 StorageBackendLabel::Filesystem
751 }
752 }
753
754 pub(super) fn current_log_level(&self) -> LogLevel {
756 LogLevel::from_u8(self.log_level.load(Ordering::Relaxed))
757 }
758
759 pub(super) fn log_at(&self, level: LogLevel, msg: &str) {
762 if level > self.current_log_level() {
763 return;
764 }
765 if let Some(handler) = &self.log_handler {
766 handler(msg);
767 } else {
768 stderr_write(msg);
769 }
770 }
771
772 pub(super) fn log(&self, msg: &str) {
776 self.log_at(LogLevel::Info, msg);
777 }
778
779 #[allow(dead_code)]
781 pub(super) fn log_error(&self, msg: &str) {
782 self.log_at(LogLevel::Error, msg);
783 }
784
785 pub(super) fn log_warn(&self, msg: &str) {
787 self.log_at(LogLevel::Warn, msg);
788 }
789
790 #[allow(dead_code)]
792 pub(super) fn log_debug(&self, msg: &str) {
793 self.log_at(LogLevel::Debug, msg);
794 }
795
796 pub fn with_signed_url_credentials(
814 mut self,
815 key_id: impl Into<String>,
816 secret: impl Into<String>,
817 ) -> Self {
818 let key_id = key_id.into();
819 let secret = secret.into();
820 self.signing_keys.insert(key_id.clone(), secret.clone());
821 self.signed_url_key_id = Some(key_id);
822 self.signed_url_secret = Some(secret);
823 self
824 }
825
826 pub fn with_signing_keys(mut self, keys: HashMap<String, String>) -> Self {
832 self.signing_keys.extend(keys);
833 self
834 }
835
836 pub fn with_insecure_url_sources(mut self, allow_insecure_url_sources: bool) -> Self {
853 self.allow_insecure_url_sources = allow_insecure_url_sources;
854 self
855 }
856
857 pub fn with_cache_root(mut self, cache_root: impl Into<PathBuf>) -> Self {
873 self.cache_root = Some(cache_root.into());
874 self
875 }
876
877 pub fn with_cache_max_bytes(mut self, max_bytes: u64) -> Self {
894 self.cache_max_bytes = max_bytes;
895 self
896 }
897
898 #[cfg(feature = "s3")]
900 pub fn with_s3_context(mut self, context: s3::S3Context) -> Self {
901 self.storage_backend = StorageBackend::S3;
902 self.s3_context = Some(Arc::new(context));
903 self
904 }
905
906 #[cfg(feature = "gcs")]
908 pub fn with_gcs_context(mut self, context: gcs::GcsContext) -> Self {
909 self.storage_backend = StorageBackend::Gcs;
910 self.gcs_context = Some(Arc::new(context));
911 self
912 }
913
914 #[cfg(feature = "azure")]
916 pub fn with_azure_context(mut self, context: azure::AzureContext) -> Self {
917 self.storage_backend = StorageBackend::Azure;
918 self.azure_context = Some(Arc::new(context));
919 self
920 }
921
922 pub fn with_presets(mut self, presets: HashMap<String, TransformOptionsPayload>) -> Self {
924 self.presets = Arc::new(std::sync::RwLock::new(presets));
925 self
926 }
927
928 pub fn from_env() -> io::Result<Self> {
1012 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1013 let storage_backend = match env::var("TRUSS_STORAGE_BACKEND")
1014 .ok()
1015 .filter(|v| !v.is_empty())
1016 {
1017 Some(value) => StorageBackend::parse(&value)
1018 .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
1019 None => StorageBackend::Filesystem,
1020 };
1021
1022 let storage_root =
1023 env::var("TRUSS_STORAGE_ROOT").unwrap_or_else(|_| DEFAULT_STORAGE_ROOT.to_string());
1024 let storage_root = PathBuf::from(storage_root).canonicalize()?;
1025 let bearer_token = env::var("TRUSS_BEARER_TOKEN")
1026 .ok()
1027 .filter(|value| !value.is_empty());
1028 let public_base_url = env::var("TRUSS_PUBLIC_BASE_URL")
1029 .ok()
1030 .filter(|value| !value.is_empty())
1031 .map(validate_public_base_url)
1032 .transpose()?;
1033 let signed_url_key_id = env::var("TRUSS_SIGNED_URL_KEY_ID")
1034 .ok()
1035 .filter(|value| !value.is_empty());
1036 let signed_url_secret = env::var("TRUSS_SIGNED_URL_SECRET")
1037 .ok()
1038 .filter(|value| !value.is_empty());
1039
1040 if signed_url_key_id.is_some() != signed_url_secret.is_some() {
1041 return Err(io::Error::new(
1042 io::ErrorKind::InvalidInput,
1043 "TRUSS_SIGNED_URL_KEY_ID and TRUSS_SIGNED_URL_SECRET must be set together",
1044 ));
1045 }
1046
1047 let mut signing_keys = HashMap::new();
1048 if let (Some(kid), Some(sec)) = (&signed_url_key_id, &signed_url_secret) {
1049 signing_keys.insert(kid.clone(), sec.clone());
1050 }
1051 if let Ok(json) = env::var("TRUSS_SIGNING_KEYS")
1052 && !json.is_empty()
1053 {
1054 let extra: HashMap<String, String> = serde_json::from_str(&json).map_err(|e| {
1055 io::Error::new(
1056 io::ErrorKind::InvalidInput,
1057 format!("TRUSS_SIGNING_KEYS must be valid JSON: {e}"),
1058 )
1059 })?;
1060 for (kid, sec) in &extra {
1061 if kid.is_empty() || sec.is_empty() {
1062 return Err(io::Error::new(
1063 io::ErrorKind::InvalidInput,
1064 "TRUSS_SIGNING_KEYS must not contain empty key IDs or secrets",
1065 ));
1066 }
1067 }
1068 signing_keys.extend(extra);
1069 }
1070
1071 if !signing_keys.is_empty() && public_base_url.is_none() {
1072 eprintln!(
1073 "truss: warning: signing keys are configured but TRUSS_PUBLIC_BASE_URL is not. \
1074 Behind a reverse proxy or CDN the Host header may differ from the externally \
1075 visible authority, causing signed URL verification to fail. Consider setting \
1076 TRUSS_PUBLIC_BASE_URL to the canonical external origin."
1077 );
1078 }
1079
1080 let cache_root = env::var("TRUSS_CACHE_ROOT")
1081 .ok()
1082 .filter(|value| !value.is_empty())
1083 .map(PathBuf::from);
1084
1085 let cache_max_bytes =
1086 parse_env_u64_ranged("TRUSS_CACHE_MAX_BYTES", 0, u64::MAX)?.unwrap_or(0);
1087
1088 let public_max_age_seconds = parse_optional_env_u32("TRUSS_PUBLIC_MAX_AGE")?
1089 .unwrap_or(DEFAULT_PUBLIC_MAX_AGE_SECONDS);
1090 let public_stale_while_revalidate_seconds =
1091 parse_optional_env_u32("TRUSS_PUBLIC_STALE_WHILE_REVALIDATE")?
1092 .unwrap_or(DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS);
1093
1094 let allow_insecure_url_sources = env_flag("TRUSS_ALLOW_INSECURE_URL_SOURCES");
1095
1096 let max_concurrent_transforms =
1097 parse_env_u64_ranged("TRUSS_MAX_CONCURRENT_TRANSFORMS", 1, 1024)?
1098 .unwrap_or(DEFAULT_MAX_CONCURRENT_TRANSFORMS);
1099
1100 let transform_deadline_secs =
1101 parse_env_u64_ranged("TRUSS_TRANSFORM_DEADLINE_SECS", 1, 300)?
1102 .unwrap_or(DEFAULT_TRANSFORM_DEADLINE_SECS);
1103
1104 let max_input_pixels =
1105 parse_env_u64_ranged("TRUSS_MAX_INPUT_PIXELS", 1, crate::MAX_DECODED_PIXELS)?
1106 .unwrap_or(DEFAULT_MAX_INPUT_PIXELS);
1107
1108 let max_upload_bytes =
1109 parse_env_u64_ranged("TRUSS_MAX_UPLOAD_BYTES", 1, 10 * 1024 * 1024 * 1024)?
1110 .unwrap_or(DEFAULT_MAX_UPLOAD_BODY_BYTES as u64) as usize;
1111
1112 let keep_alive_max_requests =
1113 parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000)?
1114 .unwrap_or(DEFAULT_KEEP_ALIVE_MAX_REQUESTS);
1115
1116 let max_source_bytes =
1117 parse_env_u64_ranged("TRUSS_MAX_SOURCE_BYTES", 1, 10 * 1024 * 1024 * 1024)?
1118 .unwrap_or(super::remote::MAX_SOURCE_BYTES);
1119
1120 let max_watermark_bytes =
1121 parse_env_u64_ranged("TRUSS_MAX_WATERMARK_BYTES", 1, 1024 * 1024 * 1024)?
1122 .unwrap_or(super::remote::MAX_WATERMARK_BYTES);
1123
1124 let max_remote_redirects = parse_env_u64_ranged("TRUSS_MAX_REMOTE_REDIRECTS", 0, 20)?
1125 .unwrap_or(super::remote::MAX_REMOTE_REDIRECTS as u64)
1126 as usize;
1127
1128 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1129 let storage_timeout_secs = parse_env_u64_ranged("TRUSS_STORAGE_TIMEOUT_SECS", 1, 300)?
1130 .unwrap_or(STORAGE_DOWNLOAD_TIMEOUT_SECS);
1131
1132 #[cfg(feature = "s3")]
1133 let s3_context = if storage_backend == StorageBackend::S3 {
1134 let bucket = env::var("TRUSS_S3_BUCKET")
1135 .ok()
1136 .filter(|v| !v.is_empty())
1137 .ok_or_else(|| {
1138 io::Error::new(
1139 io::ErrorKind::InvalidInput,
1140 "TRUSS_S3_BUCKET is required when TRUSS_STORAGE_BACKEND=s3",
1141 )
1142 })?;
1143 Some(Arc::new(s3::build_s3_context(
1144 bucket,
1145 allow_insecure_url_sources,
1146 )?))
1147 } else {
1148 None
1149 };
1150
1151 #[cfg(feature = "gcs")]
1152 let gcs_context = if storage_backend == StorageBackend::Gcs {
1153 let bucket = env::var("TRUSS_GCS_BUCKET")
1154 .ok()
1155 .filter(|v| !v.is_empty())
1156 .ok_or_else(|| {
1157 io::Error::new(
1158 io::ErrorKind::InvalidInput,
1159 "TRUSS_GCS_BUCKET is required when TRUSS_STORAGE_BACKEND=gcs",
1160 )
1161 })?;
1162 Some(Arc::new(gcs::build_gcs_context(
1163 bucket,
1164 allow_insecure_url_sources,
1165 )?))
1166 } else {
1167 if env::var("TRUSS_GCS_BUCKET")
1168 .ok()
1169 .filter(|v| !v.is_empty())
1170 .is_some()
1171 {
1172 eprintln!(
1173 "truss: warning: TRUSS_GCS_BUCKET is set but TRUSS_STORAGE_BACKEND is not \
1174 `gcs`. The GCS bucket will be ignored. Set TRUSS_STORAGE_BACKEND=gcs to \
1175 enable the GCS backend."
1176 );
1177 }
1178 None
1179 };
1180
1181 #[cfg(feature = "azure")]
1182 let azure_context = if storage_backend == StorageBackend::Azure {
1183 let container = env::var("TRUSS_AZURE_CONTAINER")
1184 .ok()
1185 .filter(|v| !v.is_empty())
1186 .ok_or_else(|| {
1187 io::Error::new(
1188 io::ErrorKind::InvalidInput,
1189 "TRUSS_AZURE_CONTAINER is required when TRUSS_STORAGE_BACKEND=azure",
1190 )
1191 })?;
1192 Some(Arc::new(azure::build_azure_context(
1193 container,
1194 allow_insecure_url_sources,
1195 )?))
1196 } else {
1197 if env::var("TRUSS_AZURE_CONTAINER")
1198 .ok()
1199 .filter(|v| !v.is_empty())
1200 .is_some()
1201 {
1202 eprintln!(
1203 "truss: warning: TRUSS_AZURE_CONTAINER is set but TRUSS_STORAGE_BACKEND is not \
1204 `azure`. The Azure container will be ignored. Set TRUSS_STORAGE_BACKEND=azure to \
1205 enable the Azure backend."
1206 );
1207 }
1208 None
1209 };
1210
1211 let metrics_token = env::var("TRUSS_METRICS_TOKEN")
1212 .ok()
1213 .filter(|value| !value.is_empty());
1214 let disable_metrics = env_flag("TRUSS_DISABLE_METRICS");
1215
1216 let health_cache_min_free_bytes =
1217 parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX)?;
1218 let health_max_memory_bytes =
1219 parse_env_u64_ranged("TRUSS_HEALTH_MAX_MEMORY_BYTES", 1, u64::MAX)?;
1220
1221 let (presets, presets_file_path) = parse_presets_from_env()?;
1222
1223 let shutdown_drain_secs = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300)?
1224 .unwrap_or(DEFAULT_SHUTDOWN_DRAIN_SECS);
1225
1226 let custom_response_headers = parse_response_headers_from_env()?;
1227
1228 let enable_compression = !env_flag("TRUSS_DISABLE_COMPRESSION");
1229 let compression_level =
1230 parse_env_u64_ranged("TRUSS_COMPRESSION_LEVEL", 0, 9)?.unwrap_or(1) as u32;
1231
1232 let log_level = match env::var("TRUSS_LOG_LEVEL").ok().filter(|v| !v.is_empty()) {
1233 Some(val) => val
1234 .parse::<LogLevel>()
1235 .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
1236 None => LogLevel::Info,
1237 };
1238
1239 let format_preference = parse_format_preference_from_env()?;
1240
1241 let rate_limiter = {
1242 let rps = parse_env_u64_ranged("TRUSS_RATE_LIMIT_RPS", 0, 100_000)?.unwrap_or(0);
1243 if rps > 0 {
1244 let burst =
1245 parse_env_u64_ranged("TRUSS_RATE_LIMIT_BURST", 1, 100_000)?.unwrap_or(rps);
1246 Some(Arc::new(super::rate_limit::RateLimiter::new(
1247 rps as f64,
1248 burst as f64,
1249 )))
1250 } else {
1251 None
1252 }
1253 };
1254
1255 Ok(Self {
1256 storage_root,
1257 bearer_token,
1258 public_base_url,
1259 signed_url_key_id,
1260 signed_url_secret,
1261 signing_keys,
1262 allow_insecure_url_sources,
1263 cache_root,
1264 cache_max_bytes,
1265 public_max_age_seconds,
1266 public_stale_while_revalidate_seconds,
1267 disable_accept_negotiation: env_flag("TRUSS_DISABLE_ACCEPT_NEGOTIATION"),
1268 format_preference,
1269 log_handler: None,
1270 log_level: Arc::new(AtomicU8::new(log_level as u8)),
1271 max_concurrent_transforms,
1272 transform_deadline_secs,
1273 max_input_pixels,
1274 max_upload_bytes,
1275 keep_alive_max_requests,
1276 metrics_token,
1277 disable_metrics,
1278 health_cache_min_free_bytes,
1279 health_max_memory_bytes,
1280 shutdown_drain_secs,
1281 draining: Arc::new(AtomicBool::new(false)),
1282 custom_response_headers,
1283 max_source_bytes,
1284 max_watermark_bytes,
1285 max_remote_redirects,
1286 enable_compression,
1287 compression_level,
1288 transforms_in_flight: Arc::new(AtomicU64::new(0)),
1289 presets: Arc::new(std::sync::RwLock::new(presets)),
1290 presets_file_path,
1291 rate_limiter,
1292 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1293 storage_timeout_secs,
1294 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1295 storage_backend,
1296 #[cfg(feature = "s3")]
1297 s3_context,
1298 #[cfg(feature = "gcs")]
1299 gcs_context,
1300 #[cfg(feature = "azure")]
1301 azure_context,
1302 })
1303 }
1304}
1305
1306pub(super) fn parse_env_u64_ranged(name: &str, min: u64, max: u64) -> io::Result<Option<u64>> {
1311 match env::var(name).ok().filter(|v| !v.is_empty()) {
1312 Some(value) => {
1313 let n: u64 = value.parse().map_err(|_| {
1314 io::Error::new(
1315 io::ErrorKind::InvalidInput,
1316 format!("{name} must be a positive integer"),
1317 )
1318 })?;
1319 if n < min || n > max {
1320 return Err(io::Error::new(
1321 io::ErrorKind::InvalidInput,
1322 format!("{name} must be between {min} and {max}"),
1323 ));
1324 }
1325 Ok(Some(n))
1326 }
1327 None => Ok(None),
1328 }
1329}
1330
1331pub(super) fn parse_format_preference_from_env() -> io::Result<Vec<crate::MediaType>> {
1338 let value = match env::var("TRUSS_FORMAT_PREFERENCE")
1339 .ok()
1340 .filter(|v| !v.is_empty())
1341 {
1342 Some(v) => v,
1343 None => return Ok(Vec::new()),
1344 };
1345
1346 let mut formats = Vec::new();
1347 for segment in value.split(',') {
1348 let name = segment.trim();
1349 if name.is_empty() {
1350 continue;
1351 }
1352 let media_type: crate::MediaType = name.parse().map_err(|e: String| {
1353 io::Error::new(
1354 io::ErrorKind::InvalidInput,
1355 format!("TRUSS_FORMAT_PREFERENCE: {e}"),
1356 )
1357 })?;
1358 if formats.contains(&media_type) {
1359 return Err(io::Error::new(
1360 io::ErrorKind::InvalidInput,
1361 format!("TRUSS_FORMAT_PREFERENCE: duplicate format `{name}`"),
1362 ));
1363 }
1364 formats.push(media_type);
1365 }
1366 Ok(formats)
1367}
1368
1369pub(super) fn env_flag(name: &str) -> bool {
1370 env::var(name)
1371 .map(|value| {
1372 matches!(
1373 value.as_str(),
1374 "1" | "true" | "TRUE" | "yes" | "YES" | "on" | "ON"
1375 )
1376 })
1377 .unwrap_or(false)
1378}
1379
1380pub(super) fn parse_optional_env_u32(name: &str) -> io::Result<Option<u32>> {
1381 match env::var(name) {
1382 Ok(value) if !value.is_empty() => value.parse::<u32>().map(Some).map_err(|_| {
1383 io::Error::new(
1384 io::ErrorKind::InvalidInput,
1385 format!("{name} must be a non-negative integer"),
1386 )
1387 }),
1388 _ => Ok(None),
1389 }
1390}
1391
1392pub(super) fn parse_presets_from_env()
1395-> io::Result<(HashMap<String, TransformOptionsPayload>, Option<PathBuf>)> {
1396 let (json_str, source, file_path) = match env::var("TRUSS_PRESETS_FILE")
1397 .ok()
1398 .filter(|v| !v.is_empty())
1399 {
1400 Some(path) => {
1401 let content = std::fs::read_to_string(&path).map_err(|e| {
1402 io::Error::new(
1403 io::ErrorKind::InvalidInput,
1404 format!("failed to read TRUSS_PRESETS_FILE `{path}`: {e}"),
1405 )
1406 })?;
1407 let pb = PathBuf::from(&path);
1408 (content, format!("TRUSS_PRESETS_FILE `{path}`"), Some(pb))
1409 }
1410 None => match env::var("TRUSS_PRESETS").ok().filter(|v| !v.is_empty()) {
1411 Some(value) => (value, "TRUSS_PRESETS".to_string(), None),
1412 None => return Ok((HashMap::new(), None)),
1413 },
1414 };
1415
1416 let presets = serde_json::from_str::<HashMap<String, TransformOptionsPayload>>(&json_str)
1417 .map_err(|e| {
1418 io::Error::new(
1419 io::ErrorKind::InvalidInput,
1420 format!("{source} must be valid JSON: {e}"),
1421 )
1422 })?;
1423 Ok((presets, file_path))
1424}
1425
1426pub(super) fn parse_presets_file(
1428 path: &std::path::Path,
1429) -> io::Result<HashMap<String, TransformOptionsPayload>> {
1430 let content = std::fs::read_to_string(path)?;
1431 serde_json::from_str::<HashMap<String, TransformOptionsPayload>>(&content).map_err(|e| {
1432 io::Error::new(
1433 io::ErrorKind::InvalidData,
1434 format!("invalid preset JSON in `{}`: {e}", path.display()),
1435 )
1436 })
1437}
1438
1439fn parse_response_headers_from_env() -> io::Result<Vec<(String, String)>> {
1443 let raw = match env::var("TRUSS_RESPONSE_HEADERS")
1444 .ok()
1445 .filter(|v| !v.is_empty())
1446 {
1447 Some(value) => value,
1448 None => return Ok(Vec::new()),
1449 };
1450
1451 let map: HashMap<String, String> = serde_json::from_str(&raw).map_err(|e| {
1452 io::Error::new(
1453 io::ErrorKind::InvalidInput,
1454 format!("TRUSS_RESPONSE_HEADERS must be a JSON object: {e}"),
1455 )
1456 })?;
1457
1458 let mut headers = Vec::with_capacity(map.len());
1459 for (name, value) in map {
1460 validate_header_name(&name)?;
1461 reject_denied_header(&name)?;
1462 validate_header_value(&name, &value)?;
1463 headers.push((name, value));
1464 }
1465 headers.sort_by(|a, b| a.0.cmp(&b.0));
1467 Ok(headers)
1468}
1469
1470fn validate_header_name(name: &str) -> io::Result<()> {
1472 if name.is_empty() {
1473 return Err(io::Error::new(
1474 io::ErrorKind::InvalidInput,
1475 "TRUSS_RESPONSE_HEADERS: header name must not be empty",
1476 ));
1477 }
1478 for byte in name.bytes() {
1482 let valid = byte.is_ascii_alphanumeric()
1483 || matches!(
1484 byte,
1485 b'!' | b'#'
1486 | b'$'
1487 | b'%'
1488 | b'&'
1489 | b'\''
1490 | b'*'
1491 | b'+'
1492 | b'-'
1493 | b'.'
1494 | b'^'
1495 | b'_'
1496 | b'`'
1497 | b'|'
1498 | b'~'
1499 );
1500 if !valid {
1501 return Err(io::Error::new(
1502 io::ErrorKind::InvalidInput,
1503 format!("TRUSS_RESPONSE_HEADERS: invalid character in header name `{name}`"),
1504 ));
1505 }
1506 }
1507 Ok(())
1508}
1509
1510fn validate_header_value(name: &str, value: &str) -> io::Result<()> {
1512 for byte in value.bytes() {
1513 let valid = byte == b'\t' || (0x20..=0x7E).contains(&byte);
1514 if !valid {
1515 return Err(io::Error::new(
1516 io::ErrorKind::InvalidInput,
1517 format!("TRUSS_RESPONSE_HEADERS: invalid character in value for header `{name}`"),
1518 ));
1519 }
1520 }
1521 Ok(())
1522}
1523
1524fn reject_denied_header(name: &str) -> io::Result<()> {
1528 const DENIED: &[&str] = &[
1529 "content-length",
1530 "transfer-encoding",
1531 "content-encoding",
1532 "content-type",
1533 "connection",
1534 "host",
1535 "upgrade",
1536 "proxy-connection",
1537 "keep-alive",
1538 "te",
1539 "trailer",
1540 ];
1541 let lower = name.to_ascii_lowercase();
1542 if DENIED.contains(&lower.as_str()) {
1543 return Err(io::Error::new(
1544 io::ErrorKind::InvalidInput,
1545 format!(
1546 "TRUSS_RESPONSE_HEADERS: header `{name}` is not allowed (framing/hop-by-hop header)"
1547 ),
1548 ));
1549 }
1550 Ok(())
1551}
1552
1553pub(super) fn validate_public_base_url(value: String) -> io::Result<String> {
1554 let parsed = Url::parse(&value).map_err(|error| {
1555 io::Error::new(
1556 io::ErrorKind::InvalidInput,
1557 format!("TRUSS_PUBLIC_BASE_URL must be a valid URL: {error}"),
1558 )
1559 })?;
1560
1561 match parsed.scheme() {
1562 "http" | "https" => Ok(parsed.to_string()),
1563 _ => Err(io::Error::new(
1564 io::ErrorKind::InvalidInput,
1565 "TRUSS_PUBLIC_BASE_URL must use http or https",
1566 )),
1567 }
1568}
1569
1570#[cfg(test)]
1571mod tests {
1572 use super::*;
1573 use serial_test::serial;
1574
1575 struct ScopedEnv {
1577 key: &'static str,
1578 }
1579
1580 impl ScopedEnv {
1581 fn set(key: &'static str, value: &str) -> Self {
1582 unsafe { env::set_var(key, value) };
1584 Self { key }
1585 }
1586
1587 fn remove(key: &'static str) -> Self {
1588 unsafe { env::remove_var(key) };
1590 Self { key }
1591 }
1592 }
1593
1594 impl Drop for ScopedEnv {
1595 fn drop(&mut self) {
1596 unsafe { env::remove_var(self.key) };
1598 }
1599 }
1600
1601 #[test]
1602 fn keep_alive_default() {
1603 let config = ServerConfig::new(PathBuf::from("."), None);
1604 assert_eq!(config.keep_alive_max_requests, 100);
1605 }
1606
1607 #[test]
1608 #[serial]
1609 fn parse_keep_alive_env_valid() {
1610 let _env = ScopedEnv::set("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "500");
1611 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1612 assert_eq!(result.unwrap(), Some(500));
1613 }
1614
1615 #[test]
1616 #[serial]
1617 fn parse_keep_alive_env_zero_rejected() {
1618 let _env = ScopedEnv::set("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "0");
1619 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1620 assert!(result.is_err());
1621 }
1622
1623 #[test]
1624 #[serial]
1625 fn parse_keep_alive_env_over_max_rejected() {
1626 let _env = ScopedEnv::set("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "100001");
1627 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1628 assert!(result.is_err());
1629 }
1630
1631 #[test]
1632 fn health_thresholds_default_none() {
1633 let config = ServerConfig::new(PathBuf::from("."), None);
1634 assert!(config.health_cache_min_free_bytes.is_none());
1635 assert!(config.health_max_memory_bytes.is_none());
1636 }
1637
1638 #[test]
1639 #[serial]
1640 fn parse_health_cache_min_free_bytes_valid() {
1641 let _env = ScopedEnv::set("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", "1073741824");
1642 let result = parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX);
1643 assert_eq!(result.unwrap(), Some(1_073_741_824));
1644 }
1645
1646 #[test]
1647 #[serial]
1648 fn parse_health_max_memory_bytes_valid() {
1649 let _env = ScopedEnv::set("TRUSS_HEALTH_MAX_MEMORY_BYTES", "536870912");
1650 let result = parse_env_u64_ranged("TRUSS_HEALTH_MAX_MEMORY_BYTES", 1, u64::MAX);
1651 assert_eq!(result.unwrap(), Some(536_870_912));
1652 }
1653
1654 #[test]
1655 #[serial]
1656 fn parse_health_threshold_zero_rejected() {
1657 let _env = ScopedEnv::set("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", "0");
1658 let result = parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX);
1659 assert!(result.is_err());
1660 }
1661
1662 #[test]
1665 fn shutdown_drain_secs_default() {
1666 let config = ServerConfig::new(PathBuf::from("."), None);
1667 assert_eq!(config.shutdown_drain_secs, DEFAULT_SHUTDOWN_DRAIN_SECS);
1668 }
1669
1670 #[test]
1671 fn draining_default_false() {
1672 let config = ServerConfig::new(PathBuf::from("."), None);
1673 assert!(!config.draining.load(std::sync::atomic::Ordering::Relaxed));
1674 }
1675
1676 #[test]
1677 #[serial]
1678 fn parse_shutdown_drain_secs_valid() {
1679 let _env = ScopedEnv::set("TRUSS_SHUTDOWN_DRAIN_SECS", "30");
1680 let result = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300);
1681 assert_eq!(result.unwrap(), Some(30));
1682 }
1683
1684 #[test]
1685 #[serial]
1686 fn parse_shutdown_drain_secs_over_max_rejected() {
1687 let _env = ScopedEnv::set("TRUSS_SHUTDOWN_DRAIN_SECS", "301");
1688 let result = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300);
1689 assert!(result.is_err());
1690 }
1691
1692 #[test]
1695 fn presets_default_empty() {
1696 let config = ServerConfig::new(PathBuf::from("."), None);
1697 assert!(config.presets.read().unwrap().is_empty());
1698 assert!(config.presets_file_path.is_none());
1699 }
1700
1701 #[test]
1702 fn parse_presets_file_valid() {
1703 let dir = std::env::temp_dir().join(format!(
1704 "truss_test_presets_{}",
1705 std::time::SystemTime::UNIX_EPOCH
1706 .elapsed()
1707 .unwrap()
1708 .as_nanos()
1709 ));
1710 std::fs::create_dir_all(&dir).unwrap();
1711 let path = dir.join("presets.json");
1712 std::fs::write(
1713 &path,
1714 r#"{"thumb":{"width":100,"height":100},"banner":{"width":1200}}"#,
1715 )
1716 .unwrap();
1717
1718 let presets = super::parse_presets_file(&path).unwrap();
1719 assert_eq!(presets.len(), 2);
1720 assert_eq!(presets["thumb"].width, Some(100));
1721 assert_eq!(presets["thumb"].height, Some(100));
1722 assert_eq!(presets["banner"].width, Some(1200));
1723
1724 std::fs::remove_dir_all(&dir).unwrap();
1725 }
1726
1727 #[test]
1728 fn parse_presets_file_invalid_json() {
1729 let dir = std::env::temp_dir().join(format!(
1730 "truss_test_presets_invalid_{}",
1731 std::time::SystemTime::UNIX_EPOCH
1732 .elapsed()
1733 .unwrap()
1734 .as_nanos()
1735 ));
1736 std::fs::create_dir_all(&dir).unwrap();
1737 let path = dir.join("bad.json");
1738 std::fs::write(&path, "not valid json {{{").unwrap();
1739
1740 let result = super::parse_presets_file(&path);
1741 assert!(result.is_err());
1742
1743 std::fs::remove_dir_all(&dir).unwrap();
1744 }
1745
1746 #[test]
1747 fn parse_presets_file_nonexistent() {
1748 let result =
1749 super::parse_presets_file(std::path::Path::new("/tmp/nonexistent_truss_test.json"));
1750 assert!(result.is_err());
1751 }
1752
1753 #[test]
1754 #[serial]
1755 fn parse_presets_from_env_returns_file_path() {
1756 let dir = std::env::temp_dir().join(format!(
1757 "truss_test_presets_path_{}",
1758 std::time::SystemTime::UNIX_EPOCH
1759 .elapsed()
1760 .unwrap()
1761 .as_nanos()
1762 ));
1763 std::fs::create_dir_all(&dir).unwrap();
1764 let path = dir.join("presets.json");
1765 std::fs::write(&path, r#"{"thumb":{"width":100}}"#).unwrap();
1766
1767 let _env = ScopedEnv::set("TRUSS_PRESETS_FILE", path.to_str().unwrap());
1768 let _env2 = ScopedEnv::remove("TRUSS_PRESETS");
1769 let (presets, file_path) = super::parse_presets_from_env().unwrap();
1770
1771 assert_eq!(presets.len(), 1);
1772 assert_eq!(file_path, Some(path));
1773
1774 std::fs::remove_dir_all(&dir).unwrap();
1775 }
1776
1777 #[test]
1778 fn with_presets_sets_presets() {
1779 let mut map = HashMap::new();
1780 map.insert(
1781 "test".to_string(),
1782 super::super::TransformOptionsPayload {
1783 width: Some(200),
1784 height: None,
1785 fit: None,
1786 position: None,
1787 format: None,
1788 quality: None,
1789 background: None,
1790 rotate: None,
1791 auto_orient: None,
1792 strip_metadata: None,
1793 preserve_exif: None,
1794 crop: None,
1795 blur: None,
1796 sharpen: None,
1797 },
1798 );
1799 let config = ServerConfig::new(PathBuf::from("."), None).with_presets(map);
1800 let presets = config.presets.read().unwrap();
1801 assert_eq!(presets.len(), 1);
1802 assert_eq!(presets["test"].width, Some(200));
1803 }
1804
1805 #[test]
1808 fn custom_response_headers_default_empty() {
1809 let config = ServerConfig::new(PathBuf::from("."), None);
1810 assert!(config.custom_response_headers.is_empty());
1811 }
1812
1813 #[test]
1814 #[serial]
1815 fn parse_response_headers_valid_json() {
1816 let _env = ScopedEnv::set(
1817 "TRUSS_RESPONSE_HEADERS",
1818 r#"{"CDN-Cache-Control":"max-age=3600","X-Custom":"value"}"#,
1819 );
1820 let result = parse_response_headers_from_env();
1821 let headers = result.unwrap();
1822 assert_eq!(headers.len(), 2);
1823 assert_eq!(headers[0].0, "CDN-Cache-Control");
1825 assert_eq!(headers[0].1, "max-age=3600");
1826 assert_eq!(headers[1].0, "X-Custom");
1827 assert_eq!(headers[1].1, "value");
1828 }
1829
1830 #[test]
1831 #[serial]
1832 fn parse_response_headers_invalid_json() {
1833 let _env = ScopedEnv::set("TRUSS_RESPONSE_HEADERS", "not json");
1834 let result = parse_response_headers_from_env();
1835 assert!(result.is_err());
1836 }
1837
1838 #[test]
1839 #[serial]
1840 fn parse_response_headers_empty_name_rejected() {
1841 let _env = ScopedEnv::set("TRUSS_RESPONSE_HEADERS", r#"{"":"value"}"#);
1842 let result = parse_response_headers_from_env();
1843 assert!(result.is_err());
1844 }
1845
1846 #[test]
1847 #[serial]
1848 fn parse_response_headers_invalid_name_character() {
1849 let _env = ScopedEnv::set("TRUSS_RESPONSE_HEADERS", r#"{"Bad Header":"value"}"#);
1850 let result = parse_response_headers_from_env();
1851 assert!(result.is_err());
1852 }
1853
1854 #[test]
1855 #[serial]
1856 fn parse_response_headers_invalid_value_character() {
1857 let _env = ScopedEnv::set("TRUSS_RESPONSE_HEADERS", r#"{"X-Bad":"val\u0000ue"}"#);
1858 let result = parse_response_headers_from_env();
1859 assert!(result.is_err());
1860 }
1861
1862 #[test]
1863 fn validate_header_name_valid() {
1864 assert!(super::validate_header_name("Cache-Control").is_ok());
1865 assert!(super::validate_header_name("X-Custom-Header").is_ok());
1866 assert!(super::validate_header_name("CDN-Cache-Control").is_ok());
1867 }
1868
1869 #[test]
1870 fn validate_header_name_rejects_space() {
1871 assert!(super::validate_header_name("Bad Header").is_err());
1872 }
1873
1874 #[test]
1875 fn validate_header_name_rejects_empty() {
1876 assert!(super::validate_header_name("").is_err());
1877 }
1878
1879 #[test]
1880 fn validate_header_value_valid() {
1881 assert!(super::validate_header_value("X", "normal value").is_ok());
1882 assert!(super::validate_header_value("X", "max-age=3600, public").is_ok());
1883 }
1884
1885 #[test]
1886 fn validate_header_value_rejects_null() {
1887 assert!(super::validate_header_value("X", "bad\x00value").is_err());
1888 }
1889
1890 #[test]
1893 fn compression_enabled_by_default() {
1894 let config = ServerConfig::new(PathBuf::from("."), None);
1895 assert!(config.enable_compression);
1896 }
1897
1898 #[test]
1901 fn log_level_default_info() {
1902 let config = ServerConfig::new(PathBuf::from("."), None);
1903 assert_eq!(config.current_log_level(), LogLevel::Info);
1904 }
1905
1906 #[test]
1907 fn log_level_cycle() {
1908 assert_eq!(LogLevel::Info.cycle(), LogLevel::Debug);
1909 assert_eq!(LogLevel::Debug.cycle(), LogLevel::Error);
1910 assert_eq!(LogLevel::Error.cycle(), LogLevel::Warn);
1911 assert_eq!(LogLevel::Warn.cycle(), LogLevel::Info);
1912 }
1913
1914 #[test]
1915 fn log_level_from_str() {
1916 assert_eq!("error".parse::<LogLevel>().unwrap(), LogLevel::Error);
1917 assert_eq!("WARN".parse::<LogLevel>().unwrap(), LogLevel::Warn);
1918 assert_eq!("Info".parse::<LogLevel>().unwrap(), LogLevel::Info);
1919 assert_eq!("DEBUG".parse::<LogLevel>().unwrap(), LogLevel::Debug);
1920 assert!("invalid".parse::<LogLevel>().is_err());
1921 }
1922
1923 #[test]
1924 fn log_level_display() {
1925 assert_eq!(LogLevel::Error.to_string(), "error");
1926 assert_eq!(LogLevel::Warn.to_string(), "warn");
1927 assert_eq!(LogLevel::Info.to_string(), "info");
1928 assert_eq!(LogLevel::Debug.to_string(), "debug");
1929 }
1930
1931 #[test]
1932 fn log_level_from_u8_roundtrip() {
1933 for level in [
1934 LogLevel::Error,
1935 LogLevel::Warn,
1936 LogLevel::Info,
1937 LogLevel::Debug,
1938 ] {
1939 assert_eq!(LogLevel::from_u8(level as u8), level);
1940 }
1941 assert_eq!(LogLevel::from_u8(42), LogLevel::Info);
1943 }
1944
1945 #[test]
1946 #[serial]
1947 fn parse_log_level_from_env() {
1948 let _env = ScopedEnv::set("TRUSS_LOG_LEVEL", "debug");
1949 let config = ServerConfig::from_env().unwrap();
1950 assert_eq!(config.current_log_level(), LogLevel::Debug);
1951 }
1952
1953 #[test]
1954 #[serial]
1955 fn parse_log_level_invalid_rejected() {
1956 let _env = ScopedEnv::set("TRUSS_LOG_LEVEL", "verbose");
1957 let result = ServerConfig::from_env();
1958 assert!(result.is_err());
1959 }
1960
1961 #[test]
1962 fn log_at_filters_by_level() {
1963 use std::sync::Mutex;
1964
1965 let messages: Arc<Mutex<Vec<String>>> = Arc::new(Mutex::new(Vec::new()));
1966 let msgs = Arc::clone(&messages);
1967 let handler: LogHandler = Arc::new(move |msg: &str| {
1968 msgs.lock().unwrap().push(msg.to_string());
1969 });
1970
1971 let mut config = ServerConfig::new(PathBuf::from("."), None);
1972 config.log_handler = Some(handler);
1973 config
1975 .log_level
1976 .store(LogLevel::Warn as u8, std::sync::atomic::Ordering::Relaxed);
1977
1978 config.log_at(LogLevel::Error, "err");
1979 config.log_at(LogLevel::Warn, "wrn");
1980 config.log_at(LogLevel::Info, "inf");
1981 config.log_at(LogLevel::Debug, "dbg");
1982
1983 let logged = messages.lock().unwrap();
1984 assert_eq!(*logged, vec!["err", "wrn"]);
1985 }
1986
1987 #[test]
1990 #[serial]
1991 fn parse_format_preference_unset_returns_empty() {
1992 let _env = ScopedEnv::remove("TRUSS_FORMAT_PREFERENCE");
1993 let result = parse_format_preference_from_env().unwrap();
1994 assert!(result.is_empty());
1995 }
1996
1997 #[test]
1998 #[serial]
1999 fn parse_format_preference_empty_returns_empty() {
2000 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "");
2001 let result = parse_format_preference_from_env().unwrap();
2002 assert!(result.is_empty());
2003 }
2004
2005 #[test]
2006 #[serial]
2007 fn parse_format_preference_single_format() {
2008 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "webp");
2009 let result = parse_format_preference_from_env().unwrap();
2010 assert_eq!(result, vec![crate::MediaType::Webp]);
2011 }
2012
2013 #[test]
2014 #[serial]
2015 fn parse_format_preference_multiple_formats() {
2016 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "avif,webp,png,jpeg");
2017 let result = parse_format_preference_from_env().unwrap();
2018 assert_eq!(
2019 result,
2020 vec![
2021 crate::MediaType::Avif,
2022 crate::MediaType::Webp,
2023 crate::MediaType::Png,
2024 crate::MediaType::Jpeg,
2025 ]
2026 );
2027 }
2028
2029 #[test]
2030 #[serial]
2031 fn parse_format_preference_with_spaces() {
2032 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", " webp , jpeg , png ");
2033 let result = parse_format_preference_from_env().unwrap();
2034 assert_eq!(
2035 result,
2036 vec![
2037 crate::MediaType::Webp,
2038 crate::MediaType::Jpeg,
2039 crate::MediaType::Png,
2040 ]
2041 );
2042 }
2043
2044 #[test]
2045 #[serial]
2046 fn parse_format_preference_invalid_format_rejected() {
2047 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "webp,gif");
2048 let result = parse_format_preference_from_env();
2049 assert!(result.is_err());
2050 let msg = result.unwrap_err().to_string();
2051 assert!(msg.contains("TRUSS_FORMAT_PREFERENCE"));
2052 }
2053
2054 #[test]
2055 #[serial]
2056 fn parse_format_preference_duplicate_rejected() {
2057 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "webp,jpeg,webp");
2058 let result = parse_format_preference_from_env();
2059 assert!(result.is_err());
2060 let msg = result.unwrap_err().to_string();
2061 assert!(msg.contains("duplicate"));
2062 }
2063
2064 #[test]
2065 #[serial]
2066 fn parse_format_preference_trailing_comma_ok() {
2067 let _env = ScopedEnv::set("TRUSS_FORMAT_PREFERENCE", "avif,webp,");
2068 let result = parse_format_preference_from_env().unwrap();
2069 assert_eq!(result, vec![crate::MediaType::Avif, crate::MediaType::Webp]);
2070 }
2071}