1use super::TransformOptionsPayload;
2#[cfg(feature = "azure")]
3use super::azure;
4#[cfg(feature = "gcs")]
5use super::gcs;
6pub(super) const DEFAULT_MAX_CONCURRENT_TRANSFORMS: u64 = 64;
8#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
9use super::remote::STORAGE_DOWNLOAD_TIMEOUT_SECS;
10#[cfg(feature = "s3")]
11use super::s3;
12use super::stderr_write;
13
14use std::collections::HashMap;
15use std::env;
16use std::fmt;
17use std::io;
18use std::path::PathBuf;
19use std::sync::Arc;
20use std::sync::atomic::{AtomicBool, AtomicU64};
21use url::Url;
22
23#[derive(Debug, Clone, Copy)]
26#[allow(dead_code)]
27pub(super) enum StorageBackendLabel {
28 Filesystem,
29 S3,
30 Gcs,
31 Azure,
32}
33
34#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
37#[derive(Debug, Clone, Copy, PartialEq, Eq)]
38pub enum StorageBackend {
39 Filesystem,
41 #[cfg(feature = "s3")]
43 S3,
44 #[cfg(feature = "gcs")]
46 Gcs,
47 #[cfg(feature = "azure")]
49 Azure,
50}
51
52#[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
53impl StorageBackend {
54 pub fn parse(value: &str) -> Result<Self, String> {
56 match value.to_ascii_lowercase().as_str() {
57 "filesystem" | "fs" | "local" => Ok(Self::Filesystem),
58 #[cfg(feature = "s3")]
59 "s3" => Ok(Self::S3),
60 #[cfg(feature = "gcs")]
61 "gcs" => Ok(Self::Gcs),
62 #[cfg(feature = "azure")]
63 "azure" => Ok(Self::Azure),
64 _ => {
65 let mut expected = vec!["filesystem"];
66 #[cfg(feature = "s3")]
67 expected.push("s3");
68 #[cfg(feature = "gcs")]
69 expected.push("gcs");
70 #[cfg(feature = "azure")]
71 expected.push("azure");
72
73 #[allow(unused_mut)]
74 let mut hint = String::new();
75 #[cfg(not(feature = "s3"))]
76 if value.eq_ignore_ascii_case("s3") {
77 hint = " (hint: rebuild with --features s3)".to_string();
78 }
79 #[cfg(not(feature = "gcs"))]
80 if value.eq_ignore_ascii_case("gcs") {
81 hint = " (hint: rebuild with --features gcs)".to_string();
82 }
83 #[cfg(not(feature = "azure"))]
84 if value.eq_ignore_ascii_case("azure") {
85 hint = " (hint: rebuild with --features azure)".to_string();
86 }
87
88 Err(format!(
89 "unknown storage backend `{value}` (expected {}){hint}",
90 expected.join(" or ")
91 ))
92 }
93 }
94 }
95}
96
97pub const DEFAULT_BIND_ADDR: &str = "127.0.0.1:8080";
99
100pub const DEFAULT_STORAGE_ROOT: &str = ".";
102
103pub(super) const DEFAULT_PUBLIC_MAX_AGE_SECONDS: u32 = 3600;
104pub(super) const DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS: u32 = 60;
105
106pub(super) const DEFAULT_SHUTDOWN_DRAIN_SECS: u64 = 10;
109
110pub(super) const DEFAULT_TRANSFORM_DEADLINE_SECS: u64 = 30;
113
114pub(super) const DEFAULT_MAX_INPUT_PIXELS: u64 = 40_000_000;
117
118pub(super) const DEFAULT_KEEP_ALIVE_MAX_REQUESTS: u64 = 100;
122
123use super::http_parse::DEFAULT_MAX_UPLOAD_BODY_BYTES;
124
125pub type LogHandler = Arc<dyn Fn(&str) + Send + Sync>;
136
137pub struct ServerConfig {
138 pub storage_root: PathBuf,
140 pub bearer_token: Option<String>,
142 pub public_base_url: Option<String>,
149 pub signed_url_key_id: Option<String>,
155 pub signed_url_secret: Option<String>,
159 pub signing_keys: HashMap<String, String>,
169 pub allow_insecure_url_sources: bool,
175 pub cache_root: Option<PathBuf>,
182 pub public_max_age_seconds: u32,
187 pub public_stale_while_revalidate_seconds: u32,
192 pub disable_accept_negotiation: bool,
200 pub log_handler: Option<LogHandler>,
206 pub max_concurrent_transforms: u64,
210 pub transform_deadline_secs: u64,
214 pub max_input_pixels: u64,
219 pub max_upload_bytes: usize,
224 pub keep_alive_max_requests: u64,
228 pub metrics_token: Option<String>,
234 pub disable_metrics: bool,
238 pub health_cache_min_free_bytes: Option<u64>,
243 pub health_max_memory_bytes: Option<u64>,
248 pub shutdown_drain_secs: u64,
257 pub draining: Arc<AtomicBool>,
262 pub custom_response_headers: Vec<(String, String)>,
267 pub enable_compression: bool,
271 pub compression_level: u32,
277 pub transforms_in_flight: Arc<AtomicU64>,
283 pub presets: HashMap<String, TransformOptionsPayload>,
288 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
292 pub storage_timeout_secs: u64,
293 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
295 pub storage_backend: StorageBackend,
296 #[cfg(feature = "s3")]
298 pub s3_context: Option<Arc<s3::S3Context>>,
299 #[cfg(feature = "gcs")]
301 pub gcs_context: Option<Arc<gcs::GcsContext>>,
302 #[cfg(feature = "azure")]
304 pub azure_context: Option<Arc<azure::AzureContext>>,
305}
306
307impl Clone for ServerConfig {
308 fn clone(&self) -> Self {
309 Self {
310 storage_root: self.storage_root.clone(),
311 bearer_token: self.bearer_token.clone(),
312 public_base_url: self.public_base_url.clone(),
313 signed_url_key_id: self.signed_url_key_id.clone(),
314 signed_url_secret: self.signed_url_secret.clone(),
315 signing_keys: self.signing_keys.clone(),
316 allow_insecure_url_sources: self.allow_insecure_url_sources,
317 cache_root: self.cache_root.clone(),
318 public_max_age_seconds: self.public_max_age_seconds,
319 public_stale_while_revalidate_seconds: self.public_stale_while_revalidate_seconds,
320 disable_accept_negotiation: self.disable_accept_negotiation,
321 log_handler: self.log_handler.clone(),
322 max_concurrent_transforms: self.max_concurrent_transforms,
323 transform_deadline_secs: self.transform_deadline_secs,
324 max_input_pixels: self.max_input_pixels,
325 max_upload_bytes: self.max_upload_bytes,
326 keep_alive_max_requests: self.keep_alive_max_requests,
327 metrics_token: self.metrics_token.clone(),
328 disable_metrics: self.disable_metrics,
329 health_cache_min_free_bytes: self.health_cache_min_free_bytes,
330 health_max_memory_bytes: self.health_max_memory_bytes,
331 shutdown_drain_secs: self.shutdown_drain_secs,
332 draining: Arc::clone(&self.draining),
333 custom_response_headers: self.custom_response_headers.clone(),
334 enable_compression: self.enable_compression,
335 compression_level: self.compression_level,
336 transforms_in_flight: Arc::clone(&self.transforms_in_flight),
337 presets: self.presets.clone(),
338 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
339 storage_timeout_secs: self.storage_timeout_secs,
340 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
341 storage_backend: self.storage_backend,
342 #[cfg(feature = "s3")]
343 s3_context: self.s3_context.clone(),
344 #[cfg(feature = "gcs")]
345 gcs_context: self.gcs_context.clone(),
346 #[cfg(feature = "azure")]
347 azure_context: self.azure_context.clone(),
348 }
349 }
350}
351
352impl fmt::Debug for ServerConfig {
353 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
354 let mut d = f.debug_struct("ServerConfig");
355 d.field("storage_root", &self.storage_root)
356 .field(
357 "bearer_token",
358 &self.bearer_token.as_ref().map(|_| "[REDACTED]"),
359 )
360 .field("public_base_url", &self.public_base_url)
361 .field("signed_url_key_id", &self.signed_url_key_id)
362 .field(
363 "signed_url_secret",
364 &self.signed_url_secret.as_ref().map(|_| "[REDACTED]"),
365 )
366 .field(
367 "signing_keys",
368 &self.signing_keys.keys().collect::<Vec<_>>(),
369 )
370 .field(
371 "allow_insecure_url_sources",
372 &self.allow_insecure_url_sources,
373 )
374 .field("cache_root", &self.cache_root)
375 .field("public_max_age_seconds", &self.public_max_age_seconds)
376 .field(
377 "public_stale_while_revalidate_seconds",
378 &self.public_stale_while_revalidate_seconds,
379 )
380 .field(
381 "disable_accept_negotiation",
382 &self.disable_accept_negotiation,
383 )
384 .field("log_handler", &self.log_handler.as_ref().map(|_| ".."))
385 .field("max_concurrent_transforms", &self.max_concurrent_transforms)
386 .field("transform_deadline_secs", &self.transform_deadline_secs)
387 .field("max_input_pixels", &self.max_input_pixels)
388 .field("max_upload_bytes", &self.max_upload_bytes)
389 .field("keep_alive_max_requests", &self.keep_alive_max_requests)
390 .field(
391 "metrics_token",
392 &self.metrics_token.as_ref().map(|_| "[REDACTED]"),
393 )
394 .field("disable_metrics", &self.disable_metrics)
395 .field(
396 "health_cache_min_free_bytes",
397 &self.health_cache_min_free_bytes,
398 )
399 .field("health_max_memory_bytes", &self.health_max_memory_bytes)
400 .field("shutdown_drain_secs", &self.shutdown_drain_secs)
401 .field(
402 "custom_response_headers",
403 &self.custom_response_headers.len(),
404 )
405 .field("enable_compression", &self.enable_compression)
406 .field("compression_level", &self.compression_level)
407 .field("presets", &self.presets.keys().collect::<Vec<_>>());
408 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
409 {
410 d.field("storage_backend", &self.storage_backend);
411 }
412 #[cfg(feature = "s3")]
413 {
414 d.field("s3_context", &self.s3_context.as_ref().map(|_| ".."));
415 }
416 #[cfg(feature = "gcs")]
417 {
418 d.field("gcs_context", &self.gcs_context.as_ref().map(|_| ".."));
419 }
420 #[cfg(feature = "azure")]
421 {
422 d.field("azure_context", &self.azure_context.as_ref().map(|_| ".."));
423 }
424 d.finish()
425 }
426}
427
428impl PartialEq for ServerConfig {
429 fn eq(&self, other: &Self) -> bool {
430 self.storage_root == other.storage_root
431 && self.bearer_token == other.bearer_token
432 && self.public_base_url == other.public_base_url
433 && self.signed_url_key_id == other.signed_url_key_id
434 && self.signed_url_secret == other.signed_url_secret
435 && self.signing_keys == other.signing_keys
436 && self.allow_insecure_url_sources == other.allow_insecure_url_sources
437 && self.cache_root == other.cache_root
438 && self.public_max_age_seconds == other.public_max_age_seconds
439 && self.public_stale_while_revalidate_seconds
440 == other.public_stale_while_revalidate_seconds
441 && self.disable_accept_negotiation == other.disable_accept_negotiation
442 && self.max_concurrent_transforms == other.max_concurrent_transforms
443 && self.transform_deadline_secs == other.transform_deadline_secs
444 && self.max_input_pixels == other.max_input_pixels
445 && self.max_upload_bytes == other.max_upload_bytes
446 && self.keep_alive_max_requests == other.keep_alive_max_requests
447 && self.metrics_token == other.metrics_token
448 && self.disable_metrics == other.disable_metrics
449 && self.health_cache_min_free_bytes == other.health_cache_min_free_bytes
450 && self.health_max_memory_bytes == other.health_max_memory_bytes
451 && self.shutdown_drain_secs == other.shutdown_drain_secs
452 && self.custom_response_headers == other.custom_response_headers
453 && self.enable_compression == other.enable_compression
454 && self.compression_level == other.compression_level
455 && self.presets == other.presets
456 && cfg_storage_eq(self, other)
457 }
458}
459
460fn cfg_storage_eq(_this: &ServerConfig, _other: &ServerConfig) -> bool {
461 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
462 {
463 if _this.storage_backend != _other.storage_backend {
464 return false;
465 }
466 }
467 #[cfg(feature = "s3")]
468 {
469 if _this
470 .s3_context
471 .as_ref()
472 .map(|c| (&c.default_bucket, &c.endpoint_url))
473 != _other
474 .s3_context
475 .as_ref()
476 .map(|c| (&c.default_bucket, &c.endpoint_url))
477 {
478 return false;
479 }
480 }
481 #[cfg(feature = "gcs")]
482 {
483 if _this
484 .gcs_context
485 .as_ref()
486 .map(|c| (&c.default_bucket, &c.endpoint_url))
487 != _other
488 .gcs_context
489 .as_ref()
490 .map(|c| (&c.default_bucket, &c.endpoint_url))
491 {
492 return false;
493 }
494 }
495 #[cfg(feature = "azure")]
496 {
497 if _this
498 .azure_context
499 .as_ref()
500 .map(|c| (&c.default_container, &c.endpoint_url))
501 != _other
502 .azure_context
503 .as_ref()
504 .map(|c| (&c.default_container, &c.endpoint_url))
505 {
506 return false;
507 }
508 }
509 true
510}
511
512impl Eq for ServerConfig {}
513
514impl ServerConfig {
515 pub fn new(storage_root: PathBuf, bearer_token: Option<String>) -> Self {
530 Self {
531 storage_root,
532 bearer_token,
533 public_base_url: None,
534 signed_url_key_id: None,
535 signed_url_secret: None,
536 signing_keys: HashMap::new(),
537 allow_insecure_url_sources: false,
538 cache_root: None,
539 public_max_age_seconds: DEFAULT_PUBLIC_MAX_AGE_SECONDS,
540 public_stale_while_revalidate_seconds: DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS,
541 disable_accept_negotiation: false,
542 log_handler: None,
543 max_concurrent_transforms: DEFAULT_MAX_CONCURRENT_TRANSFORMS,
544 transform_deadline_secs: DEFAULT_TRANSFORM_DEADLINE_SECS,
545 max_input_pixels: DEFAULT_MAX_INPUT_PIXELS,
546 max_upload_bytes: DEFAULT_MAX_UPLOAD_BODY_BYTES,
547 keep_alive_max_requests: DEFAULT_KEEP_ALIVE_MAX_REQUESTS,
548 metrics_token: None,
549 disable_metrics: false,
550 health_cache_min_free_bytes: None,
551 health_max_memory_bytes: None,
552 shutdown_drain_secs: DEFAULT_SHUTDOWN_DRAIN_SECS,
553 draining: Arc::new(AtomicBool::new(false)),
554 custom_response_headers: Vec::new(),
555 enable_compression: true,
556 compression_level: 1,
557 transforms_in_flight: Arc::new(AtomicU64::new(0)),
558 presets: HashMap::new(),
559 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
560 storage_timeout_secs: STORAGE_DOWNLOAD_TIMEOUT_SECS,
561 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
562 storage_backend: StorageBackend::Filesystem,
563 #[cfg(feature = "s3")]
564 s3_context: None,
565 #[cfg(feature = "gcs")]
566 gcs_context: None,
567 #[cfg(feature = "azure")]
568 azure_context: None,
569 }
570 }
571
572 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
573 pub(super) fn storage_backend_label(&self) -> StorageBackendLabel {
574 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
575 {
576 match self.storage_backend {
577 StorageBackend::Filesystem => StorageBackendLabel::Filesystem,
578 #[cfg(feature = "s3")]
579 StorageBackend::S3 => StorageBackendLabel::S3,
580 #[cfg(feature = "gcs")]
581 StorageBackend::Gcs => StorageBackendLabel::Gcs,
582 #[cfg(feature = "azure")]
583 StorageBackend::Azure => StorageBackendLabel::Azure,
584 }
585 }
586 #[cfg(not(any(feature = "s3", feature = "gcs", feature = "azure")))]
587 {
588 StorageBackendLabel::Filesystem
589 }
590 }
591
592 pub(super) fn log(&self, msg: &str) {
595 if let Some(handler) = &self.log_handler {
596 handler(msg);
597 } else {
598 stderr_write(msg);
599 }
600 }
601
602 pub fn with_signed_url_credentials(
620 mut self,
621 key_id: impl Into<String>,
622 secret: impl Into<String>,
623 ) -> Self {
624 let key_id = key_id.into();
625 let secret = secret.into();
626 self.signing_keys.insert(key_id.clone(), secret.clone());
627 self.signed_url_key_id = Some(key_id);
628 self.signed_url_secret = Some(secret);
629 self
630 }
631
632 pub fn with_signing_keys(mut self, keys: HashMap<String, String>) -> Self {
638 self.signing_keys.extend(keys);
639 self
640 }
641
642 pub fn with_insecure_url_sources(mut self, allow_insecure_url_sources: bool) -> Self {
659 self.allow_insecure_url_sources = allow_insecure_url_sources;
660 self
661 }
662
663 pub fn with_cache_root(mut self, cache_root: impl Into<PathBuf>) -> Self {
679 self.cache_root = Some(cache_root.into());
680 self
681 }
682
683 #[cfg(feature = "s3")]
685 pub fn with_s3_context(mut self, context: s3::S3Context) -> Self {
686 self.storage_backend = StorageBackend::S3;
687 self.s3_context = Some(Arc::new(context));
688 self
689 }
690
691 #[cfg(feature = "gcs")]
693 pub fn with_gcs_context(mut self, context: gcs::GcsContext) -> Self {
694 self.storage_backend = StorageBackend::Gcs;
695 self.gcs_context = Some(Arc::new(context));
696 self
697 }
698
699 #[cfg(feature = "azure")]
701 pub fn with_azure_context(mut self, context: azure::AzureContext) -> Self {
702 self.storage_backend = StorageBackend::Azure;
703 self.azure_context = Some(Arc::new(context));
704 self
705 }
706
707 pub fn with_presets(mut self, presets: HashMap<String, TransformOptionsPayload>) -> Self {
709 self.presets = presets;
710 self
711 }
712
713 pub fn from_env() -> io::Result<Self> {
797 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
798 let storage_backend = match env::var("TRUSS_STORAGE_BACKEND")
799 .ok()
800 .filter(|v| !v.is_empty())
801 {
802 Some(value) => StorageBackend::parse(&value)
803 .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
804 None => StorageBackend::Filesystem,
805 };
806
807 let storage_root =
808 env::var("TRUSS_STORAGE_ROOT").unwrap_or_else(|_| DEFAULT_STORAGE_ROOT.to_string());
809 let storage_root = PathBuf::from(storage_root).canonicalize()?;
810 let bearer_token = env::var("TRUSS_BEARER_TOKEN")
811 .ok()
812 .filter(|value| !value.is_empty());
813 let public_base_url = env::var("TRUSS_PUBLIC_BASE_URL")
814 .ok()
815 .filter(|value| !value.is_empty())
816 .map(validate_public_base_url)
817 .transpose()?;
818 let signed_url_key_id = env::var("TRUSS_SIGNED_URL_KEY_ID")
819 .ok()
820 .filter(|value| !value.is_empty());
821 let signed_url_secret = env::var("TRUSS_SIGNED_URL_SECRET")
822 .ok()
823 .filter(|value| !value.is_empty());
824
825 if signed_url_key_id.is_some() != signed_url_secret.is_some() {
826 return Err(io::Error::new(
827 io::ErrorKind::InvalidInput,
828 "TRUSS_SIGNED_URL_KEY_ID and TRUSS_SIGNED_URL_SECRET must be set together",
829 ));
830 }
831
832 let mut signing_keys = HashMap::new();
833 if let (Some(kid), Some(sec)) = (&signed_url_key_id, &signed_url_secret) {
834 signing_keys.insert(kid.clone(), sec.clone());
835 }
836 if let Ok(json) = env::var("TRUSS_SIGNING_KEYS")
837 && !json.is_empty()
838 {
839 let extra: HashMap<String, String> = serde_json::from_str(&json).map_err(|e| {
840 io::Error::new(
841 io::ErrorKind::InvalidInput,
842 format!("TRUSS_SIGNING_KEYS must be valid JSON: {e}"),
843 )
844 })?;
845 for (kid, sec) in &extra {
846 if kid.is_empty() || sec.is_empty() {
847 return Err(io::Error::new(
848 io::ErrorKind::InvalidInput,
849 "TRUSS_SIGNING_KEYS must not contain empty key IDs or secrets",
850 ));
851 }
852 }
853 signing_keys.extend(extra);
854 }
855
856 if !signing_keys.is_empty() && public_base_url.is_none() {
857 eprintln!(
858 "truss: warning: signing keys are configured but TRUSS_PUBLIC_BASE_URL is not. \
859 Behind a reverse proxy or CDN the Host header may differ from the externally \
860 visible authority, causing signed URL verification to fail. Consider setting \
861 TRUSS_PUBLIC_BASE_URL to the canonical external origin."
862 );
863 }
864
865 let cache_root = env::var("TRUSS_CACHE_ROOT")
866 .ok()
867 .filter(|value| !value.is_empty())
868 .map(PathBuf::from);
869
870 let public_max_age_seconds = parse_optional_env_u32("TRUSS_PUBLIC_MAX_AGE")?
871 .unwrap_or(DEFAULT_PUBLIC_MAX_AGE_SECONDS);
872 let public_stale_while_revalidate_seconds =
873 parse_optional_env_u32("TRUSS_PUBLIC_STALE_WHILE_REVALIDATE")?
874 .unwrap_or(DEFAULT_PUBLIC_STALE_WHILE_REVALIDATE_SECONDS);
875
876 let allow_insecure_url_sources = env_flag("TRUSS_ALLOW_INSECURE_URL_SOURCES");
877
878 let max_concurrent_transforms =
879 parse_env_u64_ranged("TRUSS_MAX_CONCURRENT_TRANSFORMS", 1, 1024)?
880 .unwrap_or(DEFAULT_MAX_CONCURRENT_TRANSFORMS);
881
882 let transform_deadline_secs =
883 parse_env_u64_ranged("TRUSS_TRANSFORM_DEADLINE_SECS", 1, 300)?
884 .unwrap_or(DEFAULT_TRANSFORM_DEADLINE_SECS);
885
886 let max_input_pixels =
887 parse_env_u64_ranged("TRUSS_MAX_INPUT_PIXELS", 1, crate::MAX_DECODED_PIXELS)?
888 .unwrap_or(DEFAULT_MAX_INPUT_PIXELS);
889
890 let max_upload_bytes =
891 parse_env_u64_ranged("TRUSS_MAX_UPLOAD_BYTES", 1, 10 * 1024 * 1024 * 1024)?
892 .unwrap_or(DEFAULT_MAX_UPLOAD_BODY_BYTES as u64) as usize;
893
894 let keep_alive_max_requests =
895 parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000)?
896 .unwrap_or(DEFAULT_KEEP_ALIVE_MAX_REQUESTS);
897
898 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
899 let storage_timeout_secs = parse_env_u64_ranged("TRUSS_STORAGE_TIMEOUT_SECS", 1, 300)?
900 .unwrap_or(STORAGE_DOWNLOAD_TIMEOUT_SECS);
901
902 #[cfg(feature = "s3")]
903 let s3_context = if storage_backend == StorageBackend::S3 {
904 let bucket = env::var("TRUSS_S3_BUCKET")
905 .ok()
906 .filter(|v| !v.is_empty())
907 .ok_or_else(|| {
908 io::Error::new(
909 io::ErrorKind::InvalidInput,
910 "TRUSS_S3_BUCKET is required when TRUSS_STORAGE_BACKEND=s3",
911 )
912 })?;
913 Some(Arc::new(s3::build_s3_context(
914 bucket,
915 allow_insecure_url_sources,
916 )?))
917 } else {
918 None
919 };
920
921 #[cfg(feature = "gcs")]
922 let gcs_context = if storage_backend == StorageBackend::Gcs {
923 let bucket = env::var("TRUSS_GCS_BUCKET")
924 .ok()
925 .filter(|v| !v.is_empty())
926 .ok_or_else(|| {
927 io::Error::new(
928 io::ErrorKind::InvalidInput,
929 "TRUSS_GCS_BUCKET is required when TRUSS_STORAGE_BACKEND=gcs",
930 )
931 })?;
932 Some(Arc::new(gcs::build_gcs_context(
933 bucket,
934 allow_insecure_url_sources,
935 )?))
936 } else {
937 if env::var("TRUSS_GCS_BUCKET")
938 .ok()
939 .filter(|v| !v.is_empty())
940 .is_some()
941 {
942 eprintln!(
943 "truss: warning: TRUSS_GCS_BUCKET is set but TRUSS_STORAGE_BACKEND is not \
944 `gcs`. The GCS bucket will be ignored. Set TRUSS_STORAGE_BACKEND=gcs to \
945 enable the GCS backend."
946 );
947 }
948 None
949 };
950
951 #[cfg(feature = "azure")]
952 let azure_context = if storage_backend == StorageBackend::Azure {
953 let container = env::var("TRUSS_AZURE_CONTAINER")
954 .ok()
955 .filter(|v| !v.is_empty())
956 .ok_or_else(|| {
957 io::Error::new(
958 io::ErrorKind::InvalidInput,
959 "TRUSS_AZURE_CONTAINER is required when TRUSS_STORAGE_BACKEND=azure",
960 )
961 })?;
962 Some(Arc::new(azure::build_azure_context(
963 container,
964 allow_insecure_url_sources,
965 )?))
966 } else {
967 if env::var("TRUSS_AZURE_CONTAINER")
968 .ok()
969 .filter(|v| !v.is_empty())
970 .is_some()
971 {
972 eprintln!(
973 "truss: warning: TRUSS_AZURE_CONTAINER is set but TRUSS_STORAGE_BACKEND is not \
974 `azure`. The Azure container will be ignored. Set TRUSS_STORAGE_BACKEND=azure to \
975 enable the Azure backend."
976 );
977 }
978 None
979 };
980
981 let metrics_token = env::var("TRUSS_METRICS_TOKEN")
982 .ok()
983 .filter(|value| !value.is_empty());
984 let disable_metrics = env_flag("TRUSS_DISABLE_METRICS");
985
986 let health_cache_min_free_bytes =
987 parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX)?;
988 let health_max_memory_bytes =
989 parse_env_u64_ranged("TRUSS_HEALTH_MAX_MEMORY_BYTES", 1, u64::MAX)?;
990
991 let presets = parse_presets_from_env()?;
992
993 let shutdown_drain_secs = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300)?
994 .unwrap_or(DEFAULT_SHUTDOWN_DRAIN_SECS);
995
996 let custom_response_headers = parse_response_headers_from_env()?;
997
998 let enable_compression = !env_flag("TRUSS_DISABLE_COMPRESSION");
999 let compression_level =
1000 parse_env_u64_ranged("TRUSS_COMPRESSION_LEVEL", 0, 9)?.unwrap_or(1) as u32;
1001
1002 Ok(Self {
1003 storage_root,
1004 bearer_token,
1005 public_base_url,
1006 signed_url_key_id,
1007 signed_url_secret,
1008 signing_keys,
1009 allow_insecure_url_sources,
1010 cache_root,
1011 public_max_age_seconds,
1012 public_stale_while_revalidate_seconds,
1013 disable_accept_negotiation: env_flag("TRUSS_DISABLE_ACCEPT_NEGOTIATION"),
1014 log_handler: None,
1015 max_concurrent_transforms,
1016 transform_deadline_secs,
1017 max_input_pixels,
1018 max_upload_bytes,
1019 keep_alive_max_requests,
1020 metrics_token,
1021 disable_metrics,
1022 health_cache_min_free_bytes,
1023 health_max_memory_bytes,
1024 shutdown_drain_secs,
1025 draining: Arc::new(AtomicBool::new(false)),
1026 custom_response_headers,
1027 enable_compression,
1028 compression_level,
1029 transforms_in_flight: Arc::new(AtomicU64::new(0)),
1030 presets,
1031 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1032 storage_timeout_secs,
1033 #[cfg(any(feature = "s3", feature = "gcs", feature = "azure"))]
1034 storage_backend,
1035 #[cfg(feature = "s3")]
1036 s3_context,
1037 #[cfg(feature = "gcs")]
1038 gcs_context,
1039 #[cfg(feature = "azure")]
1040 azure_context,
1041 })
1042 }
1043}
1044
1045pub(super) fn parse_env_u64_ranged(name: &str, min: u64, max: u64) -> io::Result<Option<u64>> {
1050 match env::var(name).ok().filter(|v| !v.is_empty()) {
1051 Some(value) => {
1052 let n: u64 = value.parse().map_err(|_| {
1053 io::Error::new(
1054 io::ErrorKind::InvalidInput,
1055 format!("{name} must be a positive integer"),
1056 )
1057 })?;
1058 if n < min || n > max {
1059 return Err(io::Error::new(
1060 io::ErrorKind::InvalidInput,
1061 format!("{name} must be between {min} and {max}"),
1062 ));
1063 }
1064 Ok(Some(n))
1065 }
1066 None => Ok(None),
1067 }
1068}
1069
1070pub(super) fn env_flag(name: &str) -> bool {
1071 env::var(name)
1072 .map(|value| {
1073 matches!(
1074 value.as_str(),
1075 "1" | "true" | "TRUE" | "yes" | "YES" | "on" | "ON"
1076 )
1077 })
1078 .unwrap_or(false)
1079}
1080
1081pub(super) fn parse_optional_env_u32(name: &str) -> io::Result<Option<u32>> {
1082 match env::var(name) {
1083 Ok(value) if !value.is_empty() => value.parse::<u32>().map(Some).map_err(|_| {
1084 io::Error::new(
1085 io::ErrorKind::InvalidInput,
1086 format!("{name} must be a non-negative integer"),
1087 )
1088 }),
1089 _ => Ok(None),
1090 }
1091}
1092
1093pub(super) fn parse_presets_from_env() -> io::Result<HashMap<String, TransformOptionsPayload>> {
1094 let (json_str, source) = match env::var("TRUSS_PRESETS_FILE")
1095 .ok()
1096 .filter(|v| !v.is_empty())
1097 {
1098 Some(path) => {
1099 let content = std::fs::read_to_string(&path).map_err(|e| {
1100 io::Error::new(
1101 io::ErrorKind::InvalidInput,
1102 format!("failed to read TRUSS_PRESETS_FILE `{path}`: {e}"),
1103 )
1104 })?;
1105 (content, format!("TRUSS_PRESETS_FILE `{path}`"))
1106 }
1107 None => match env::var("TRUSS_PRESETS").ok().filter(|v| !v.is_empty()) {
1108 Some(value) => (value, "TRUSS_PRESETS".to_string()),
1109 None => return Ok(HashMap::new()),
1110 },
1111 };
1112
1113 serde_json::from_str::<HashMap<String, TransformOptionsPayload>>(&json_str).map_err(|e| {
1114 io::Error::new(
1115 io::ErrorKind::InvalidInput,
1116 format!("{source} must be valid JSON: {e}"),
1117 )
1118 })
1119}
1120
1121fn parse_response_headers_from_env() -> io::Result<Vec<(String, String)>> {
1125 let raw = match env::var("TRUSS_RESPONSE_HEADERS")
1126 .ok()
1127 .filter(|v| !v.is_empty())
1128 {
1129 Some(value) => value,
1130 None => return Ok(Vec::new()),
1131 };
1132
1133 let map: HashMap<String, String> = serde_json::from_str(&raw).map_err(|e| {
1134 io::Error::new(
1135 io::ErrorKind::InvalidInput,
1136 format!("TRUSS_RESPONSE_HEADERS must be a JSON object: {e}"),
1137 )
1138 })?;
1139
1140 let mut headers = Vec::with_capacity(map.len());
1141 for (name, value) in map {
1142 validate_header_name(&name)?;
1143 reject_denied_header(&name)?;
1144 validate_header_value(&name, &value)?;
1145 headers.push((name, value));
1146 }
1147 headers.sort_by(|a, b| a.0.cmp(&b.0));
1149 Ok(headers)
1150}
1151
1152fn validate_header_name(name: &str) -> io::Result<()> {
1154 if name.is_empty() {
1155 return Err(io::Error::new(
1156 io::ErrorKind::InvalidInput,
1157 "TRUSS_RESPONSE_HEADERS: header name must not be empty",
1158 ));
1159 }
1160 for byte in name.bytes() {
1164 let valid = byte.is_ascii_alphanumeric()
1165 || matches!(
1166 byte,
1167 b'!' | b'#'
1168 | b'$'
1169 | b'%'
1170 | b'&'
1171 | b'\''
1172 | b'*'
1173 | b'+'
1174 | b'-'
1175 | b'.'
1176 | b'^'
1177 | b'_'
1178 | b'`'
1179 | b'|'
1180 | b'~'
1181 );
1182 if !valid {
1183 return Err(io::Error::new(
1184 io::ErrorKind::InvalidInput,
1185 format!("TRUSS_RESPONSE_HEADERS: invalid character in header name `{name}`"),
1186 ));
1187 }
1188 }
1189 Ok(())
1190}
1191
1192fn validate_header_value(name: &str, value: &str) -> io::Result<()> {
1194 for byte in value.bytes() {
1195 let valid = byte == b'\t' || (0x20..=0x7E).contains(&byte);
1196 if !valid {
1197 return Err(io::Error::new(
1198 io::ErrorKind::InvalidInput,
1199 format!("TRUSS_RESPONSE_HEADERS: invalid character in value for header `{name}`"),
1200 ));
1201 }
1202 }
1203 Ok(())
1204}
1205
1206fn reject_denied_header(name: &str) -> io::Result<()> {
1210 const DENIED: &[&str] = &[
1211 "content-length",
1212 "transfer-encoding",
1213 "content-encoding",
1214 "content-type",
1215 "connection",
1216 "host",
1217 "upgrade",
1218 "proxy-connection",
1219 "keep-alive",
1220 "te",
1221 "trailer",
1222 ];
1223 let lower = name.to_ascii_lowercase();
1224 if DENIED.contains(&lower.as_str()) {
1225 return Err(io::Error::new(
1226 io::ErrorKind::InvalidInput,
1227 format!(
1228 "TRUSS_RESPONSE_HEADERS: header `{name}` is not allowed (framing/hop-by-hop header)"
1229 ),
1230 ));
1231 }
1232 Ok(())
1233}
1234
1235pub(super) fn validate_public_base_url(value: String) -> io::Result<String> {
1236 let parsed = Url::parse(&value).map_err(|error| {
1237 io::Error::new(
1238 io::ErrorKind::InvalidInput,
1239 format!("TRUSS_PUBLIC_BASE_URL must be a valid URL: {error}"),
1240 )
1241 })?;
1242
1243 match parsed.scheme() {
1244 "http" | "https" => Ok(parsed.to_string()),
1245 _ => Err(io::Error::new(
1246 io::ErrorKind::InvalidInput,
1247 "TRUSS_PUBLIC_BASE_URL must use http or https",
1248 )),
1249 }
1250}
1251
1252#[cfg(test)]
1253mod tests {
1254 use super::*;
1255 use serial_test::serial;
1256
1257 #[test]
1258 fn keep_alive_default() {
1259 let config = ServerConfig::new(PathBuf::from("."), None);
1260 assert_eq!(config.keep_alive_max_requests, 100);
1261 }
1262
1263 #[test]
1264 #[serial]
1265 fn parse_keep_alive_env_valid() {
1266 unsafe { env::set_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "500") };
1268 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1269 unsafe { env::remove_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS") };
1270 assert_eq!(result.unwrap(), Some(500));
1271 }
1272
1273 #[test]
1274 #[serial]
1275 fn parse_keep_alive_env_zero_rejected() {
1276 unsafe { env::set_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "0") };
1278 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1279 unsafe { env::remove_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS") };
1280 assert!(result.is_err());
1281 }
1282
1283 #[test]
1284 #[serial]
1285 fn parse_keep_alive_env_over_max_rejected() {
1286 unsafe { env::set_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS", "100001") };
1288 let result = parse_env_u64_ranged("TRUSS_KEEP_ALIVE_MAX_REQUESTS", 1, 100_000);
1289 unsafe { env::remove_var("TRUSS_KEEP_ALIVE_MAX_REQUESTS") };
1290 assert!(result.is_err());
1291 }
1292
1293 #[test]
1294 fn health_thresholds_default_none() {
1295 let config = ServerConfig::new(PathBuf::from("."), None);
1296 assert!(config.health_cache_min_free_bytes.is_none());
1297 assert!(config.health_max_memory_bytes.is_none());
1298 }
1299
1300 #[test]
1301 #[serial]
1302 fn parse_health_cache_min_free_bytes_valid() {
1303 unsafe { env::set_var("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", "1073741824") };
1305 let result = parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX);
1306 unsafe { env::remove_var("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES") };
1307 assert_eq!(result.unwrap(), Some(1_073_741_824));
1308 }
1309
1310 #[test]
1311 #[serial]
1312 fn parse_health_max_memory_bytes_valid() {
1313 unsafe { env::set_var("TRUSS_HEALTH_MAX_MEMORY_BYTES", "536870912") };
1315 let result = parse_env_u64_ranged("TRUSS_HEALTH_MAX_MEMORY_BYTES", 1, u64::MAX);
1316 unsafe { env::remove_var("TRUSS_HEALTH_MAX_MEMORY_BYTES") };
1317 assert_eq!(result.unwrap(), Some(536_870_912));
1318 }
1319
1320 #[test]
1321 #[serial]
1322 fn parse_health_threshold_zero_rejected() {
1323 unsafe { env::set_var("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", "0") };
1325 let result = parse_env_u64_ranged("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES", 1, u64::MAX);
1326 unsafe { env::remove_var("TRUSS_HEALTH_CACHE_MIN_FREE_BYTES") };
1327 assert!(result.is_err());
1328 }
1329
1330 #[test]
1333 fn shutdown_drain_secs_default() {
1334 let config = ServerConfig::new(PathBuf::from("."), None);
1335 assert_eq!(config.shutdown_drain_secs, DEFAULT_SHUTDOWN_DRAIN_SECS);
1336 }
1337
1338 #[test]
1339 fn draining_default_false() {
1340 let config = ServerConfig::new(PathBuf::from("."), None);
1341 assert!(!config.draining.load(std::sync::atomic::Ordering::Relaxed));
1342 }
1343
1344 #[test]
1345 #[serial]
1346 fn parse_shutdown_drain_secs_valid() {
1347 unsafe { env::set_var("TRUSS_SHUTDOWN_DRAIN_SECS", "30") };
1349 let result = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300);
1350 unsafe { env::remove_var("TRUSS_SHUTDOWN_DRAIN_SECS") };
1351 assert_eq!(result.unwrap(), Some(30));
1352 }
1353
1354 #[test]
1355 #[serial]
1356 fn parse_shutdown_drain_secs_over_max_rejected() {
1357 unsafe { env::set_var("TRUSS_SHUTDOWN_DRAIN_SECS", "301") };
1359 let result = parse_env_u64_ranged("TRUSS_SHUTDOWN_DRAIN_SECS", 0, 300);
1360 unsafe { env::remove_var("TRUSS_SHUTDOWN_DRAIN_SECS") };
1361 assert!(result.is_err());
1362 }
1363
1364 #[test]
1367 fn custom_response_headers_default_empty() {
1368 let config = ServerConfig::new(PathBuf::from("."), None);
1369 assert!(config.custom_response_headers.is_empty());
1370 }
1371
1372 #[test]
1373 #[serial]
1374 fn parse_response_headers_valid_json() {
1375 unsafe {
1377 env::set_var(
1378 "TRUSS_RESPONSE_HEADERS",
1379 r#"{"CDN-Cache-Control":"max-age=3600","X-Custom":"value"}"#,
1380 )
1381 };
1382 let result = parse_response_headers_from_env();
1383 unsafe { env::remove_var("TRUSS_RESPONSE_HEADERS") };
1384 let headers = result.unwrap();
1385 assert_eq!(headers.len(), 2);
1386 assert_eq!(headers[0].0, "CDN-Cache-Control");
1388 assert_eq!(headers[0].1, "max-age=3600");
1389 assert_eq!(headers[1].0, "X-Custom");
1390 assert_eq!(headers[1].1, "value");
1391 }
1392
1393 #[test]
1394 #[serial]
1395 fn parse_response_headers_invalid_json() {
1396 unsafe { env::set_var("TRUSS_RESPONSE_HEADERS", "not json") };
1398 let result = parse_response_headers_from_env();
1399 unsafe { env::remove_var("TRUSS_RESPONSE_HEADERS") };
1400 assert!(result.is_err());
1401 }
1402
1403 #[test]
1404 #[serial]
1405 fn parse_response_headers_empty_name_rejected() {
1406 unsafe { env::set_var("TRUSS_RESPONSE_HEADERS", r#"{"":"value"}"#) };
1408 let result = parse_response_headers_from_env();
1409 unsafe { env::remove_var("TRUSS_RESPONSE_HEADERS") };
1410 assert!(result.is_err());
1411 }
1412
1413 #[test]
1414 #[serial]
1415 fn parse_response_headers_invalid_name_character() {
1416 unsafe { env::set_var("TRUSS_RESPONSE_HEADERS", r#"{"Bad Header":"value"}"#) };
1418 let result = parse_response_headers_from_env();
1419 unsafe { env::remove_var("TRUSS_RESPONSE_HEADERS") };
1420 assert!(result.is_err());
1421 }
1422
1423 #[test]
1424 #[serial]
1425 fn parse_response_headers_invalid_value_character() {
1426 unsafe { env::set_var("TRUSS_RESPONSE_HEADERS", "{\"X-Bad\":\"val\\u0000ue\"}") };
1428 let result = parse_response_headers_from_env();
1429 unsafe { env::remove_var("TRUSS_RESPONSE_HEADERS") };
1430 assert!(result.is_err());
1431 }
1432
1433 #[test]
1434 fn validate_header_name_valid() {
1435 assert!(super::validate_header_name("Cache-Control").is_ok());
1436 assert!(super::validate_header_name("X-Custom-Header").is_ok());
1437 assert!(super::validate_header_name("CDN-Cache-Control").is_ok());
1438 }
1439
1440 #[test]
1441 fn validate_header_name_rejects_space() {
1442 assert!(super::validate_header_name("Bad Header").is_err());
1443 }
1444
1445 #[test]
1446 fn validate_header_name_rejects_empty() {
1447 assert!(super::validate_header_name("").is_err());
1448 }
1449
1450 #[test]
1451 fn validate_header_value_valid() {
1452 assert!(super::validate_header_value("X", "normal value").is_ok());
1453 assert!(super::validate_header_value("X", "max-age=3600, public").is_ok());
1454 }
1455
1456 #[test]
1457 fn validate_header_value_rejects_null() {
1458 assert!(super::validate_header_value("X", "bad\x00value").is_err());
1459 }
1460
1461 #[test]
1464 fn compression_enabled_by_default() {
1465 let config = ServerConfig::new(PathBuf::from("."), None);
1466 assert!(config.enable_compression);
1467 }
1468}