feldera_types/config.rs
1//! Controller configuration.
2//!
3//! This module defines the controller configuration structure. The leaves of
4//! this structure are individual transport-specific and data-format-specific
5//! endpoint configs. We represent these configs as opaque JSON values, so
6//! that the entire configuration tree can be deserialized from a JSON file.
7
8use crate::transport::adhoc::AdHocInputConfig;
9use crate::transport::clock::ClockConfig;
10use crate::transport::datagen::DatagenInputConfig;
11use crate::transport::delta_table::{DeltaTableReaderConfig, DeltaTableWriterConfig};
12use crate::transport::file::{FileInputConfig, FileOutputConfig};
13use crate::transport::http::HttpInputConfig;
14use crate::transport::iceberg::IcebergReaderConfig;
15use crate::transport::kafka::{KafkaInputConfig, KafkaOutputConfig};
16use crate::transport::nexmark::NexmarkInputConfig;
17use crate::transport::postgres::{PostgresReaderConfig, PostgresWriterConfig};
18use crate::transport::pubsub::PubSubInputConfig;
19use crate::transport::redis::RedisOutputConfig;
20use crate::transport::s3::S3InputConfig;
21use crate::transport::url::UrlInputConfig;
22use core::fmt;
23use serde::de::{self, MapAccess, Visitor};
24use serde::{Deserialize, Deserializer, Serialize};
25use serde_json::Value as JsonValue;
26use serde_yaml::Value as YamlValue;
27use std::fmt::Display;
28use std::path::Path;
29use std::str::FromStr;
30use std::time::Duration;
31use std::{borrow::Cow, cmp::max, collections::BTreeMap};
32use utoipa::openapi::{ObjectBuilder, OneOfBuilder, Ref, RefOr, Schema, SchemaType};
33use utoipa::ToSchema;
34
35const DEFAULT_MAX_PARALLEL_CONNECTOR_INIT: u64 = 10;
36
37/// Default value of `ConnectorConfig::max_queued_records`.
38pub const fn default_max_queued_records() -> u64 {
39 1_000_000
40}
41
42/// Default maximum batch size for connectors, in records.
43///
44/// If you change this then update the comment on
45/// [ConnectorConfig::max_batch_size].
46pub const fn default_max_batch_size() -> u64 {
47 10_000
48}
49
50pub const DEFAULT_CLOCK_RESOLUTION_USECS: u64 = 1_000_000;
51
52/// Pipeline deployment configuration.
53/// It represents configuration entries directly provided by the user
54/// (e.g., runtime configuration) and entries derived from the schema
55/// of the compiled program (e.g., connectors). Storage configuration,
56/// if applicable, is set by the runner.
57#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
58pub struct PipelineConfig {
59 /// Global controller configuration.
60 #[serde(flatten)]
61 #[schema(inline)]
62 pub global: RuntimeConfig,
63
64 /// Pipeline name.
65 pub name: Option<String>,
66
67 /// Configuration for persistent storage
68 ///
69 /// If `global.storage` is `Some(_)`, this field must be set to some
70 /// [`StorageConfig`]. If `global.storage` is `None``, the pipeline ignores
71 /// this field.
72 #[serde(default)]
73 pub storage_config: Option<StorageConfig>,
74
75 /// Input endpoint configuration.
76 pub inputs: BTreeMap<Cow<'static, str>, InputEndpointConfig>,
77
78 /// Output endpoint configuration.
79 #[serde(default)]
80 pub outputs: BTreeMap<Cow<'static, str>, OutputEndpointConfig>,
81}
82
83impl PipelineConfig {
84 pub fn max_parallel_connector_init(&self) -> u64 {
85 max(
86 self.global
87 .max_parallel_connector_init
88 .unwrap_or(DEFAULT_MAX_PARALLEL_CONNECTOR_INIT),
89 1,
90 )
91 }
92
93 pub fn with_storage(self, storage: Option<(StorageConfig, StorageOptions)>) -> Self {
94 let (storage_config, storage_options) = storage.unzip();
95 Self {
96 global: RuntimeConfig {
97 storage: storage_options,
98 ..self.global
99 },
100 storage_config,
101 ..self
102 }
103 }
104
105 pub fn storage(&self) -> Option<(&StorageConfig, &StorageOptions)> {
106 let storage_options = self.global.storage.as_ref();
107 let storage_config = self.storage_config.as_ref();
108 storage_config.zip(storage_options)
109 }
110}
111
112/// Configuration for persistent storage in a [`PipelineConfig`].
113#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
114pub struct StorageConfig {
115 /// A directory to keep pipeline state, as a path on the filesystem of the
116 /// machine or container where the pipeline will run.
117 ///
118 /// When storage is enabled, this directory stores the data for
119 /// [StorageBackendConfig::Default].
120 ///
121 /// When fault tolerance is enabled, this directory stores checkpoints and
122 /// the log.
123 pub path: String,
124
125 /// How to cache access to storage in this pipeline.
126 #[serde(default)]
127 pub cache: StorageCacheConfig,
128}
129
130impl StorageConfig {
131 pub fn path(&self) -> &Path {
132 Path::new(&self.path)
133 }
134}
135
136/// How to cache access to storage within a Feldera pipeline.
137#[derive(Copy, Clone, Default, Deserialize, Serialize, Debug, PartialEq, Eq, ToSchema)]
138#[serde(rename_all = "snake_case")]
139pub enum StorageCacheConfig {
140 /// Use the operating system's page cache as the primary storage cache.
141 ///
142 /// This is the default because it currently performs better than
143 /// `FelderaCache`.
144 #[default]
145 PageCache,
146
147 /// Use Feldera's internal cache implementation.
148 ///
149 /// This is under development. It will become the default when its
150 /// performance exceeds that of `PageCache`.
151 FelderaCache,
152}
153
154impl StorageCacheConfig {
155 #[cfg(unix)]
156 pub fn to_custom_open_flags(&self) -> i32 {
157 match self {
158 StorageCacheConfig::PageCache => (),
159 StorageCacheConfig::FelderaCache => {
160 #[cfg(target_os = "linux")]
161 return libc::O_DIRECT;
162 }
163 }
164 0
165 }
166}
167
168/// Storage configuration for a pipeline.
169#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
170#[serde(default)]
171pub struct StorageOptions {
172 /// How to connect to the underlying storage.
173 pub backend: StorageBackendConfig,
174
175 /// For a batch of data maintained as part of a persistent index during a
176 /// pipeline run, the minimum estimated number of bytes to write it to
177 /// storage.
178 ///
179 /// This is provided for debugging and fine-tuning and should ordinarily be
180 /// left unset.
181 ///
182 /// A value of 0 will write even empty batches to storage, and nonzero
183 /// values provide a threshold. `usize::MAX` would effectively disable
184 /// storage for such batches. The default is 1,048,576 (1 MiB).
185 pub min_storage_bytes: Option<usize>,
186
187 /// For a batch of data passed through the pipeline during a single step,
188 /// the minimum estimated number of bytes to write it to storage.
189 ///
190 /// This is provided for debugging and fine-tuning and should ordinarily be
191 /// left unset. A value of 0 will write even empty batches to storage, and
192 /// nonzero values provide a threshold. `usize::MAX`, the default,
193 /// effectively disables storage for such batches. If it is set to another
194 /// value, it should ordinarily be greater than or equal to
195 /// `min_storage_bytes`.
196 pub min_step_storage_bytes: Option<usize>,
197
198 /// The form of compression to use in data batches.
199 ///
200 /// Compression has a CPU cost but it can take better advantage of limited
201 /// NVMe and network bandwidth, which means that it can increase overall
202 /// performance.
203 pub compression: StorageCompression,
204
205 /// The maximum size of the in-memory storage cache, in MiB.
206 ///
207 /// If set, the specified cache size is spread across all the foreground and
208 /// background threads. If unset, each foreground or background thread cache
209 /// is limited to 256 MiB.
210 pub cache_mib: Option<usize>,
211}
212
213/// Backend storage configuration.
214#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
215#[serde(tag = "name", content = "config", rename_all = "snake_case")]
216pub enum StorageBackendConfig {
217 /// Use the default storage configuration.
218 ///
219 /// This currently uses the local file system.
220 #[default]
221 Default,
222
223 /// Use the local file system.
224 ///
225 /// This uses ordinary system file operations.
226 File(FileBackendConfig),
227
228 /// Object storage.
229 Object(ObjectStorageConfig),
230}
231
232impl Display for StorageBackendConfig {
233 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
234 match self {
235 StorageBackendConfig::Default => write!(f, "default"),
236 StorageBackendConfig::File(_) => write!(f, "file"),
237 StorageBackendConfig::Object(_) => write!(f, "object"),
238 }
239 }
240}
241
242/// Storage compression algorithm.
243#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
244#[serde(rename_all = "snake_case")]
245pub enum StorageCompression {
246 /// Use Feldera's default compression algorithm.
247 ///
248 /// The default may change as Feldera's performance is tuned and new
249 /// algorithms are introduced.
250 #[default]
251 Default,
252
253 /// Do not compress.
254 None,
255
256 /// Use [Snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) compression.
257 Snappy,
258}
259
260#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
261pub struct ObjectStorageConfig {
262 /// URL.
263 ///
264 /// The following URL schemes are supported:
265 ///
266 /// * S3:
267 /// - `s3://<bucket>/<path>`
268 /// - `s3a://<bucket>/<path>`
269 /// - `https://s3.<region>.amazonaws.com/<bucket>`
270 /// - `https://<bucket>.s3.<region>.amazonaws.com`
271 /// - `https://ACCOUNT_ID.r2.cloudflarestorage.com/bucket`
272 /// * Google Cloud Storage:
273 /// - `gs://<bucket>/<path>`
274 /// * Microsoft Azure Blob Storage:
275 /// - `abfs[s]://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
276 /// - `abfs[s]://<file_system>@<account_name>.dfs.core.windows.net/<path>`
277 /// - `abfs[s]://<file_system>@<account_name>.dfs.fabric.microsoft.com/<path>`
278 /// - `az://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
279 /// - `adl://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
280 /// - `azure://<container>/<path>` (custom)
281 /// - `https://<account>.dfs.core.windows.net`
282 /// - `https://<account>.blob.core.windows.net`
283 /// - `https://<account>.blob.core.windows.net/<container>`
284 /// - `https://<account>.dfs.fabric.microsoft.com`
285 /// - `https://<account>.dfs.fabric.microsoft.com/<container>`
286 /// - `https://<account>.blob.fabric.microsoft.com`
287 /// - `https://<account>.blob.fabric.microsoft.com/<container>`
288 ///
289 /// Settings derived from the URL will override other settings.
290 pub url: String,
291
292 /// Additional options as key-value pairs.
293 ///
294 /// The following keys are supported:
295 ///
296 /// * S3:
297 /// - `access_key_id`: AWS Access Key.
298 /// - `secret_access_key`: AWS Secret Access Key.
299 /// - `region`: Region.
300 /// - `default_region`: Default region.
301 /// - `endpoint`: Custom endpoint for communicating with S3,
302 /// e.g. `https://localhost:4566` for testing against a localstack
303 /// instance.
304 /// - `token`: Token to use for requests (passed to underlying provider).
305 /// - [Other keys](https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html#variants).
306 /// * Google Cloud Storage:
307 /// - `service_account`: Path to the service account file.
308 /// - `service_account_key`: The serialized service account key.
309 /// - `google_application_credentials`: Application credentials path.
310 /// - [Other keys](https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html).
311 /// * Microsoft Azure Blob Storage:
312 /// - `access_key`: Azure Access Key.
313 /// - `container_name`: Azure Container Name.
314 /// - `account`: Azure Account.
315 /// - `bearer_token_authorization`: Static bearer token for authorizing requests.
316 /// - `client_id`: Client ID for use in client secret or Kubernetes federated credential flow.
317 /// - `client_secret`: Client secret for use in client secret flow.
318 /// - `tenant_id`: Tenant ID for use in client secret or Kubernetes federated credential flow.
319 /// - `endpoint`: Override the endpoint for communicating with blob storage.
320 /// - [Other keys](https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html#variants).
321 ///
322 /// Options set through the URL take precedence over those set with these
323 /// options.
324 #[serde(flatten)]
325 pub other_options: BTreeMap<String, String>,
326}
327
328/// Configuration for local file system access.
329#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
330#[serde(default)]
331pub struct FileBackendConfig {
332 /// Whether to use background threads for file I/O.
333 ///
334 /// Background threads should improve performance, but they can reduce
335 /// performance if too few cores are available. This is provided for
336 /// debugging and fine-tuning and should ordinarily be left unset.
337 pub async_threads: Option<bool>,
338
339 /// Per-I/O operation sleep duration, in milliseconds.
340 ///
341 /// This is for simulating slow storage devices. Do not use this in
342 /// production.
343 pub ioop_delay: Option<u64>,
344}
345
346/// Global pipeline configuration settings. This is the publicly
347/// exposed type for users to configure pipelines.
348#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
349#[serde(default)]
350pub struct RuntimeConfig {
351 /// Number of DBSP worker threads.
352 ///
353 /// Each DBSP "foreground" worker thread is paired with a "background"
354 /// thread for LSM merging, making the total number of threads twice the
355 /// specified number.
356 ///
357 /// The typical sweet spot for the number of workers is between 4 and 16.
358 /// Each worker increases overall memory consumption for data structures
359 /// used during a step.
360 pub workers: u16,
361
362 /// Storage configuration.
363 ///
364 /// - If this is `None`, the default, the pipeline's state is kept in
365 /// in-memory data-structures. This is useful if the pipeline's state
366 /// will fit in memory and if the pipeline is ephemeral and does not need
367 /// to be recovered after a restart. The pipeline will most likely run
368 /// faster since it does not need to access storage.
369 ///
370 /// - If set, the pipeline's state is kept on storage. This allows the
371 /// pipeline to work with state that will not fit into memory. It also
372 /// allows the state to be checkpointed and recovered across restarts.
373 #[serde(deserialize_with = "deserialize_storage_options")]
374 pub storage: Option<StorageOptions>,
375
376 /// Fault tolerance configuration.
377 #[serde(deserialize_with = "deserialize_fault_tolerance")]
378 pub fault_tolerance: FtConfig,
379
380 /// Enable CPU profiler.
381 ///
382 /// The default value is `true`.
383 pub cpu_profiler: bool,
384
385 /// Enable pipeline tracing.
386 pub tracing: bool,
387
388 /// Jaeger tracing endpoint to send tracing information to.
389 pub tracing_endpoint_jaeger: String,
390
391 /// Minimal input batch size.
392 ///
393 /// The controller delays pushing input records to the circuit until at
394 /// least `min_batch_size_records` records have been received (total
395 /// across all endpoints) or `max_buffering_delay_usecs` microseconds
396 /// have passed since at least one input records has been buffered.
397 /// Defaults to 0.
398 pub min_batch_size_records: u64,
399
400 /// Maximal delay in microseconds to wait for `min_batch_size_records` to
401 /// get buffered by the controller, defaults to 0.
402 pub max_buffering_delay_usecs: u64,
403
404 /// Resource reservations and limits. This is enforced
405 /// only in Feldera Cloud.
406 pub resources: ResourceConfig,
407
408 /// Real-time clock resolution in microseconds.
409 ///
410 /// This parameter controls the execution of queries that use the `NOW()` function. The output of such
411 /// queries depends on the real-time clock and can change over time without any external
412 /// inputs. The pipeline will update the clock value and trigger incremental recomputation
413 /// at most each `clock_resolution_usecs` microseconds.
414 ///
415 /// It is set to 1 second (1,000,000 microseconds) by default.
416 ///
417 /// Set to `null` to disable periodic clock updates.
418 pub clock_resolution_usecs: Option<u64>,
419
420 /// Optionally, a list of CPU numbers for CPUs to which the pipeline may pin
421 /// its worker threads. Specify at least twice as many CPU numbers as
422 /// workers. CPUs are generally numbered starting from 0. The pipeline
423 /// might not be able to honor CPU pinning requests.
424 ///
425 /// CPU pinning can make pipelines run faster and perform more consistently,
426 /// as long as different pipelines running on the same machine are pinned to
427 /// different CPUs.
428 pub pin_cpus: Vec<usize>,
429
430 /// Timeout in seconds for the `Provisioning` phase of the pipeline.
431 /// Setting this value will override the default of the runner.
432 pub provisioning_timeout_secs: Option<u64>,
433
434 /// The maximum number of connectors initialized in parallel during pipeline
435 /// startup.
436 ///
437 /// At startup, the pipeline must initialize all of its input and output connectors.
438 /// Depending on the number and types of connectors, this can take a long time.
439 /// To accelerate the process, multiple connectors are initialized concurrently.
440 /// This option controls the maximum number of connectors that can be initialized
441 /// in parallel.
442 ///
443 /// The default is 10.
444 pub max_parallel_connector_init: Option<u64>,
445
446 /// Specification of additional (sidecar) containers.
447 pub init_containers: Option<serde_yaml::Value>,
448
449 /// * If `true`, the suspend operation will first atomically checkpoint the pipeline before
450 /// deprovisioning the compute resources. When resuming, the pipeline will start from this
451 /// checkpoint.
452 /// * If `false`, then the pipeline will be suspended without creating an additional checkpoint.
453 /// When resuming, it will pick up the latest checkpoint made by the periodic checkpointer or
454 /// by invoking the `/checkpoint` API.
455 pub checkpoint_during_suspend: bool,
456
457 /// Optional settings for tweaking Feldera internals.
458 ///
459 /// The available key-value pairs change from one version of Feldera to
460 /// another, so users should not depend on particular settings being
461 /// available, or on their behavior.
462 pub dev_tweaks: BTreeMap<String, serde_json::Value>,
463}
464
465/// Accepts "true" and "false" and converts them to the new format.
466fn deserialize_storage_options<'de, D>(deserializer: D) -> Result<Option<StorageOptions>, D::Error>
467where
468 D: Deserializer<'de>,
469{
470 struct BoolOrStruct;
471
472 impl<'de> Visitor<'de> for BoolOrStruct {
473 type Value = Option<StorageOptions>;
474
475 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
476 formatter.write_str("boolean or StorageOptions")
477 }
478
479 fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
480 where
481 E: de::Error,
482 {
483 match v {
484 false => Ok(None),
485 true => Ok(Some(StorageOptions::default())),
486 }
487 }
488
489 fn visit_unit<E>(self) -> Result<Self::Value, E>
490 where
491 E: de::Error,
492 {
493 Ok(None)
494 }
495
496 fn visit_none<E>(self) -> Result<Self::Value, E>
497 where
498 E: de::Error,
499 {
500 Ok(None)
501 }
502
503 fn visit_map<M>(self, map: M) -> Result<Option<StorageOptions>, M::Error>
504 where
505 M: MapAccess<'de>,
506 {
507 Deserialize::deserialize(de::value::MapAccessDeserializer::new(map)).map(Some)
508 }
509 }
510
511 deserializer.deserialize_any(BoolOrStruct)
512}
513
514/// Accepts very old 'initial_state' and 'latest_checkpoint' as enabling fault
515/// tolerance.
516///
517/// Accepts `null` as disabling fault tolerance.
518///
519/// Otherwise, deserializes [FtConfig] in the way that one might otherwise
520/// expect.
521fn deserialize_fault_tolerance<'de, D>(deserializer: D) -> Result<FtConfig, D::Error>
522where
523 D: Deserializer<'de>,
524{
525 struct StringOrStruct;
526
527 impl<'de> Visitor<'de> for StringOrStruct {
528 type Value = FtConfig;
529
530 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
531 formatter.write_str("none or FtConfig or 'initial_state' or 'latest_checkpoint'")
532 }
533
534 fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
535 where
536 E: de::Error,
537 {
538 match v {
539 "initial_state" | "latest_checkpoint" => Ok(FtConfig {
540 model: Some(FtModel::default()),
541 ..FtConfig::default()
542 }),
543 _ => Err(de::Error::invalid_value(de::Unexpected::Str(v), &self)),
544 }
545 }
546
547 fn visit_unit<E>(self) -> Result<Self::Value, E>
548 where
549 E: de::Error,
550 {
551 Ok(FtConfig::default())
552 }
553
554 fn visit_none<E>(self) -> Result<Self::Value, E>
555 where
556 E: de::Error,
557 {
558 Ok(FtConfig::default())
559 }
560
561 fn visit_map<M>(self, map: M) -> Result<FtConfig, M::Error>
562 where
563 M: MapAccess<'de>,
564 {
565 Deserialize::deserialize(de::value::MapAccessDeserializer::new(map))
566 }
567 }
568
569 deserializer.deserialize_any(StringOrStruct)
570}
571
572impl Default for RuntimeConfig {
573 fn default() -> Self {
574 Self {
575 workers: 8,
576 storage: Some(StorageOptions::default()),
577 fault_tolerance: FtConfig::default(),
578 cpu_profiler: true,
579 tracing: {
580 // We discovered that the jaeger crate can use up gigabytes of RAM, so it's not harmless
581 // to keep it on by default.
582 false
583 },
584 tracing_endpoint_jaeger: "127.0.0.1:6831".to_string(),
585 min_batch_size_records: 0,
586 max_buffering_delay_usecs: 0,
587 resources: ResourceConfig::default(),
588 clock_resolution_usecs: { Some(DEFAULT_CLOCK_RESOLUTION_USECS) },
589 pin_cpus: Vec::new(),
590 provisioning_timeout_secs: None,
591 max_parallel_connector_init: None,
592 init_containers: None,
593 checkpoint_during_suspend: true,
594 dev_tweaks: BTreeMap::default(),
595 }
596 }
597}
598
599/// Fault-tolerance configuration.
600///
601/// The default [FtConfig] (via [FtConfig::default]) disables fault tolerance,
602/// which is the configuration that one gets if [RuntimeConfig] omits fault
603/// tolerance configuration.
604///
605/// The default value for [FtConfig::model] enables fault tolerance, as
606/// `Some(FtModel::default())`. This is the configuration that one gets if
607/// [RuntimeConfig] includes a fault tolerance configuration but does not
608/// specify a particular model.
609#[derive(Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
610#[serde(rename_all = "snake_case")]
611pub struct FtConfig {
612 /// Fault tolerance model to use.
613 #[serde(with = "none_as_string")]
614 #[serde(default = "default_model")]
615 #[schema(
616 schema_with = none_as_string_schema::<FtModel>,
617 )]
618 pub model: Option<FtModel>,
619
620 /// Interval between automatic checkpoints, in seconds.
621 ///
622 /// The default is 60 seconds. Values less than 1 or greater than 3600 will
623 /// be forced into that range.
624 #[serde(default = "default_checkpoint_interval_secs")]
625 pub checkpoint_interval_secs: Option<u64>,
626}
627
628fn default_model() -> Option<FtModel> {
629 Some(FtModel::default())
630}
631
632pub fn default_checkpoint_interval_secs() -> Option<u64> {
633 Some(60)
634}
635
636impl Default for FtConfig {
637 fn default() -> Self {
638 Self {
639 model: None,
640 checkpoint_interval_secs: default_checkpoint_interval_secs(),
641 }
642 }
643}
644
645#[cfg(test)]
646mod test {
647 use super::deserialize_fault_tolerance;
648 use crate::config::{FtConfig, FtModel};
649 use serde::{Deserialize, Serialize};
650
651 #[test]
652 fn ft_config() {
653 #[derive(Serialize, Deserialize, Default, PartialEq, Eq, Debug)]
654 #[serde(default)]
655 struct Wrapper {
656 #[serde(deserialize_with = "deserialize_fault_tolerance")]
657 config: FtConfig,
658 }
659
660 // Omitting FtConfig, or specifying null, or specifying model "none", disables fault tolerance.
661 for s in [
662 "{}",
663 r#"{"config": null}"#,
664 r#"{"config": {"model": "none"}}"#,
665 ] {
666 let config: Wrapper = serde_json::from_str(s).unwrap();
667 assert_eq!(
668 config,
669 Wrapper {
670 config: FtConfig {
671 model: None,
672 checkpoint_interval_secs: Some(60)
673 }
674 }
675 );
676 }
677
678 // Serializing disabled FT produces explicit "none" form.
679 let s = serde_json::to_string(&Wrapper {
680 config: FtConfig::default(),
681 })
682 .unwrap();
683 assert!(s.contains("\"none\""));
684
685 // `{}` for FtConfig, or `{...}` with `model` omitted, enables fault
686 // tolerance.
687 for s in [r#"{"config": {}}"#, r#"{"checkpoint_interval_secs": 60}"#] {
688 assert_eq!(
689 serde_json::from_str::<FtConfig>(s).unwrap(),
690 FtConfig {
691 model: Some(FtModel::default()),
692 checkpoint_interval_secs: Some(60)
693 }
694 );
695 }
696
697 // `"checkpoint_interval_secs": null` disables periodic checkpointing.
698 assert_eq!(
699 serde_json::from_str::<FtConfig>(r#"{"checkpoint_interval_secs": null}"#).unwrap(),
700 FtConfig {
701 model: Some(FtModel::default()),
702 checkpoint_interval_secs: None
703 }
704 );
705 }
706}
707
708impl FtConfig {
709 pub fn is_enabled(&self) -> bool {
710 self.model.is_some()
711 }
712
713 /// Returns the checkpoint interval, if fault tolerance is enabled, and
714 /// otherwise `None`.
715 pub fn checkpoint_interval(&self) -> Option<Duration> {
716 if self.is_enabled() {
717 self.checkpoint_interval_secs
718 .map(|interval| Duration::from_secs(interval.clamp(1, 3600)))
719 } else {
720 None
721 }
722 }
723}
724
725/// Serde implementation for de/serializing a string into `Option<T>` where
726/// `"none"` indicates `None` and any other string indicates `Some`.
727///
728/// This could be extended to handle non-strings by adding more forwarding
729/// `visit_*` methods to the Visitor implementation. I don't see a way to write
730/// them automatically.
731mod none_as_string {
732 use std::marker::PhantomData;
733
734 use serde::de::{Deserialize, Deserializer, IntoDeserializer, Visitor};
735 use serde::ser::{Serialize, Serializer};
736
737 pub(super) fn serialize<S, T>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
738 where
739 S: Serializer,
740 T: Serialize,
741 {
742 match value.as_ref() {
743 Some(value) => value.serialize(serializer),
744 None => "none".serialize(serializer),
745 }
746 }
747
748 struct NoneAsString<T>(PhantomData<fn() -> T>);
749
750 impl<'de, T> Visitor<'de> for NoneAsString<T>
751 where
752 T: Deserialize<'de>,
753 {
754 type Value = Option<T>;
755
756 fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
757 formatter.write_str("string")
758 }
759
760 fn visit_none<E>(self) -> Result<Self::Value, E>
761 where
762 E: serde::de::Error,
763 {
764 Ok(None)
765 }
766
767 fn visit_str<E>(self, value: &str) -> Result<Option<T>, E>
768 where
769 E: serde::de::Error,
770 {
771 if &value.to_ascii_lowercase() == "none" {
772 Ok(None)
773 } else {
774 Ok(Some(T::deserialize(value.into_deserializer())?))
775 }
776 }
777 }
778
779 pub(super) fn deserialize<'de, D, T>(deserializer: D) -> Result<Option<T>, D::Error>
780 where
781 D: Deserializer<'de>,
782 T: Deserialize<'de>,
783 {
784 deserializer.deserialize_str(NoneAsString(PhantomData))
785 }
786}
787
788/// Generates an OpenAPI schema for an `Option<T>` field serialized with `none_as_string`.
789/// The schema is a `oneOf` with a reference to `T`'s schema and a `"none"` string enum.
790fn none_as_string_schema<'a, T: ToSchema<'a> + Default + Serialize>() -> Schema {
791 Schema::OneOf(
792 OneOfBuilder::new()
793 .item(RefOr::Ref(Ref::new(format!(
794 "#/components/schemas/{}",
795 T::schema().0
796 ))))
797 .item(
798 ObjectBuilder::new()
799 .schema_type(SchemaType::String)
800 .enum_values(Some(vec!["none"])),
801 )
802 .default(Some(
803 serde_json::to_value(T::default()).expect("Failed to serialize default value"),
804 ))
805 .build(),
806 )
807}
808
809/// Fault tolerance model.
810///
811/// The ordering is significant: we consider [Self::ExactlyOnce] to be a "higher
812/// level" of fault tolerance than [Self::AtLeastOnce].
813#[derive(
814 Debug, Copy, Clone, Default, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, ToSchema,
815)]
816#[serde(rename_all = "snake_case")]
817pub enum FtModel {
818 /// Each record is output at least once. Crashes may duplicate output, but
819 /// no input or output is dropped.
820 AtLeastOnce,
821
822 /// Each record is output exactly once. Crashes do not drop or duplicate
823 /// input or output.
824 #[default]
825 ExactlyOnce,
826}
827
828impl FtModel {
829 pub fn option_as_str(value: Option<FtModel>) -> &'static str {
830 value.map_or("no", |model| model.as_str())
831 }
832
833 pub fn as_str(&self) -> &'static str {
834 match self {
835 FtModel::AtLeastOnce => "at_least_once",
836 FtModel::ExactlyOnce => "exactly_once",
837 }
838 }
839}
840
841pub struct FtModelUnknown;
842
843impl FromStr for FtModel {
844 type Err = FtModelUnknown;
845
846 fn from_str(s: &str) -> Result<Self, Self::Err> {
847 match s.to_ascii_lowercase().as_str() {
848 "exactly_once" => Ok(Self::ExactlyOnce),
849 "at_least_once" => Ok(Self::AtLeastOnce),
850 _ => Err(FtModelUnknown),
851 }
852 }
853}
854
855/// Describes an input connector configuration
856#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
857pub struct InputEndpointConfig {
858 /// The name of the input stream of the circuit that this endpoint is
859 /// connected to.
860 pub stream: Cow<'static, str>,
861
862 /// Connector configuration.
863 #[serde(flatten)]
864 pub connector_config: ConnectorConfig,
865}
866
867/// Deserialize the `start_after` property of a connector configuration.
868/// It requires a non-standard deserialization because we want to accept
869/// either a string or an array of strings.
870fn deserialize_start_after<'de, D>(deserializer: D) -> Result<Option<Vec<String>>, D::Error>
871where
872 D: Deserializer<'de>,
873{
874 let value = Option::<JsonValue>::deserialize(deserializer)?;
875 match value {
876 Some(JsonValue::String(s)) => Ok(Some(vec![s])),
877 Some(JsonValue::Array(arr)) => {
878 let vec = arr
879 .into_iter()
880 .map(|item| {
881 item.as_str()
882 .map(|s| s.to_string())
883 .ok_or_else(|| serde::de::Error::custom("invalid 'start_after' property: expected a string, an array of strings, or null"))
884 })
885 .collect::<Result<Vec<String>, _>>()?;
886 Ok(Some(vec))
887 }
888 Some(JsonValue::Null) | None => Ok(None),
889 _ => Err(serde::de::Error::custom(
890 "invalid 'start_after' property: expected a string, an array of strings, or null",
891 )),
892 }
893}
894
895/// A data connector's configuration
896#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
897pub struct ConnectorConfig {
898 /// Transport endpoint configuration.
899 pub transport: TransportConfig,
900
901 /// Parser configuration.
902 pub format: Option<FormatConfig>,
903
904 /// Name of the index that the connector is attached to.
905 ///
906 /// This property is valid for output connectors only. It is used with data
907 /// transports and formats that expect output updates in the form of key/value
908 /// pairs, where the key typically represents a unique id associated with the
909 /// table or view.
910 ///
911 /// To support such output formats, an output connector can be attached to an
912 /// index created using the SQL CREATE INDEX statement. An index of a table
913 /// or view contains the same updates as the table or view itself, indexed by
914 /// one or more key columns.
915 ///
916 /// See individual connector documentation for details on how they work
917 /// with indexes.
918 pub index: Option<String>,
919
920 /// Output buffer configuration.
921 #[serde(flatten)]
922 pub output_buffer_config: OutputBufferConfig,
923
924 /// Maximum batch size, in records.
925 ///
926 /// This is the maximum number of records to process in one batch through
927 /// the circuit. The time and space cost of processing a batch is
928 /// asymptotically superlinear in the size of the batch, but very small
929 /// batches are less efficient due to constant factors.
930 ///
931 /// This should usually be less than `max_queued_records`, to give the
932 /// connector a round-trip time to restart and refill the buffer while
933 /// batches are being processed.
934 ///
935 /// Some input adapters might not honor this setting.
936 ///
937 /// The default is 10,000.
938 #[serde(default = "default_max_batch_size")]
939 pub max_batch_size: u64,
940
941 /// Backpressure threshold.
942 ///
943 /// Maximal number of records queued by the endpoint before the endpoint
944 /// is paused by the backpressure mechanism.
945 ///
946 /// For input endpoints, this setting bounds the number of records that have
947 /// been received from the input transport but haven't yet been consumed by
948 /// the circuit since the circuit, since the circuit is still busy processing
949 /// previous inputs.
950 ///
951 /// For output endpoints, this setting bounds the number of records that have
952 /// been produced by the circuit but not yet sent via the output transport endpoint
953 /// nor stored in the output buffer (see `enable_output_buffer`).
954 ///
955 /// Note that this is not a hard bound: there can be a small delay between
956 /// the backpressure mechanism is triggered and the endpoint is paused, during
957 /// which more data may be queued.
958 ///
959 /// The default is 1 million.
960 #[serde(default = "default_max_queued_records")]
961 pub max_queued_records: u64,
962
963 /// Create connector in paused state.
964 ///
965 /// The default is `false`.
966 #[serde(default)]
967 pub paused: bool,
968
969 /// Arbitrary user-defined text labels associated with the connector.
970 ///
971 /// These labels can be used in conjunction with the `start_after` property
972 /// to control the start order of connectors.
973 #[serde(default)]
974 pub labels: Vec<String>,
975
976 /// Start the connector after all connectors with specified labels.
977 ///
978 /// This property is used to control the start order of connectors.
979 /// The connector will not start until all connectors with the specified
980 /// labels have finished processing all inputs.
981 #[serde(deserialize_with = "deserialize_start_after")]
982 #[serde(default)]
983 pub start_after: Option<Vec<String>>,
984}
985
986#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
987#[serde(default)]
988pub struct OutputBufferConfig {
989 /// Enable output buffering.
990 ///
991 /// The output buffering mechanism allows decoupling the rate at which the pipeline
992 /// pushes changes to the output transport from the rate of input changes.
993 ///
994 /// By default, output updates produced by the pipeline are pushed directly to
995 /// the output transport. Some destinations may prefer to receive updates in fewer
996 /// bigger batches. For instance, when writing Parquet files, producing
997 /// one bigger file every few minutes is usually better than creating
998 /// small files every few milliseconds.
999 ///
1000 /// To achieve such input/output decoupling, users can enable output buffering by
1001 /// setting the `enable_output_buffer` flag to `true`. When buffering is enabled, output
1002 /// updates produced by the pipeline are consolidated in an internal buffer and are
1003 /// pushed to the output transport when one of several conditions is satisfied:
1004 ///
1005 /// * data has been accumulated in the buffer for more than `max_output_buffer_time_millis`
1006 /// milliseconds.
1007 /// * buffer size exceeds `max_output_buffer_size_records` records.
1008 ///
1009 /// This flag is `false` by default.
1010 // TODO: on-demand output triggered via the API.
1011 pub enable_output_buffer: bool,
1012
1013 /// Maximum time in milliseconds data is kept in the output buffer.
1014 ///
1015 /// By default, data is kept in the buffer indefinitely until one of
1016 /// the other output conditions is satisfied. When this option is
1017 /// set the buffer will be flushed at most every
1018 /// `max_output_buffer_time_millis` milliseconds.
1019 ///
1020 /// NOTE: this configuration option requires the `enable_output_buffer` flag
1021 /// to be set.
1022 pub max_output_buffer_time_millis: usize,
1023
1024 /// Maximum number of updates to be kept in the output buffer.
1025 ///
1026 /// This parameter bounds the maximal size of the buffer.
1027 /// Note that the size of the buffer is not always equal to the
1028 /// total number of updates output by the pipeline. Updates to the
1029 /// same record can overwrite or cancel previous updates.
1030 ///
1031 /// By default, the buffer can grow indefinitely until one of
1032 /// the other output conditions is satisfied.
1033 ///
1034 /// NOTE: this configuration option requires the `enable_output_buffer` flag
1035 /// to be set.
1036 pub max_output_buffer_size_records: usize,
1037}
1038
1039impl Default for OutputBufferConfig {
1040 fn default() -> Self {
1041 Self {
1042 enable_output_buffer: false,
1043 max_output_buffer_size_records: usize::MAX,
1044 max_output_buffer_time_millis: usize::MAX,
1045 }
1046 }
1047}
1048
1049impl OutputBufferConfig {
1050 pub fn validate(&self) -> Result<(), String> {
1051 if self.enable_output_buffer
1052 && self.max_output_buffer_size_records == Self::default().max_output_buffer_size_records
1053 && self.max_output_buffer_time_millis == Self::default().max_output_buffer_time_millis
1054 {
1055 return Err(
1056 "when the 'enable_output_buffer' flag is set, one of 'max_output_buffer_size_records' and 'max_output_buffer_time_millis' settings must be specified"
1057 .to_string(),
1058 );
1059 }
1060
1061 Ok(())
1062 }
1063}
1064
1065/// Describes an output connector configuration
1066#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1067pub struct OutputEndpointConfig {
1068 /// The name of the output stream of the circuit that this endpoint is
1069 /// connected to.
1070 pub stream: Cow<'static, str>,
1071
1072 /// Connector configuration.
1073 #[serde(flatten)]
1074 pub connector_config: ConnectorConfig,
1075}
1076
1077/// Transport-specific endpoint configuration passed to
1078/// `crate::OutputTransport::new_endpoint`
1079/// and `crate::InputTransport::new_endpoint`.
1080#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1081#[serde(tag = "name", content = "config", rename_all = "snake_case")]
1082pub enum TransportConfig {
1083 FileInput(FileInputConfig),
1084 FileOutput(FileOutputConfig),
1085 KafkaInput(KafkaInputConfig),
1086 KafkaOutput(KafkaOutputConfig),
1087 PubSubInput(PubSubInputConfig),
1088 UrlInput(UrlInputConfig),
1089 S3Input(S3InputConfig),
1090 DeltaTableInput(DeltaTableReaderConfig),
1091 DeltaTableOutput(DeltaTableWriterConfig),
1092 RedisOutput(RedisOutputConfig),
1093 // Prevent rust from complaining about large size difference between enum variants.
1094 IcebergInput(Box<IcebergReaderConfig>),
1095 PostgresInput(PostgresReaderConfig),
1096 PostgresOutput(PostgresWriterConfig),
1097 Datagen(DatagenInputConfig),
1098 Nexmark(NexmarkInputConfig),
1099 /// Direct HTTP input: cannot be instantiated through API
1100 HttpInput(HttpInputConfig),
1101 /// Direct HTTP output: cannot be instantiated through API
1102 HttpOutput,
1103 /// Ad hoc input: cannot be instantiated through API
1104 AdHocInput(AdHocInputConfig),
1105 ClockInput(ClockConfig),
1106}
1107
1108impl TransportConfig {
1109 pub fn name(&self) -> String {
1110 match self {
1111 TransportConfig::FileInput(_) => "file_input".to_string(),
1112 TransportConfig::FileOutput(_) => "file_output".to_string(),
1113 TransportConfig::KafkaInput(_) => "kafka_input".to_string(),
1114 TransportConfig::KafkaOutput(_) => "kafka_output".to_string(),
1115 TransportConfig::PubSubInput(_) => "pub_sub_input".to_string(),
1116 TransportConfig::UrlInput(_) => "url_input".to_string(),
1117 TransportConfig::S3Input(_) => "s3_input".to_string(),
1118 TransportConfig::DeltaTableInput(_) => "delta_table_input".to_string(),
1119 TransportConfig::DeltaTableOutput(_) => "delta_table_output".to_string(),
1120 TransportConfig::IcebergInput(_) => "iceberg_input".to_string(),
1121 TransportConfig::PostgresInput(_) => "postgres_input".to_string(),
1122 TransportConfig::PostgresOutput(_) => "postgres_output".to_string(),
1123 TransportConfig::Datagen(_) => "datagen".to_string(),
1124 TransportConfig::Nexmark(_) => "nexmark".to_string(),
1125 TransportConfig::HttpInput(_) => "http_input".to_string(),
1126 TransportConfig::HttpOutput => "http_output".to_string(),
1127 TransportConfig::AdHocInput(_) => "adhoc_input".to_string(),
1128 TransportConfig::RedisOutput(_) => "redis_output".to_string(),
1129 TransportConfig::ClockInput(_) => "clock".to_string(),
1130 }
1131 }
1132}
1133
1134/// Data format specification used to parse raw data received from the
1135/// endpoint or to encode data sent to the endpoint.
1136#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize, ToSchema)]
1137pub struct FormatConfig {
1138 /// Format name, e.g., "csv", "json", "bincode", etc.
1139 pub name: Cow<'static, str>,
1140
1141 /// Format-specific parser or encoder configuration.
1142 #[serde(default)]
1143 #[schema(value_type = Object)]
1144 pub config: YamlValue,
1145}
1146
1147#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize, ToSchema)]
1148#[serde(default)]
1149pub struct ResourceConfig {
1150 /// The minimum number of CPU cores to reserve
1151 /// for an instance of this pipeline
1152 pub cpu_cores_min: Option<u64>,
1153
1154 /// The maximum number of CPU cores to reserve
1155 /// for an instance of this pipeline
1156 pub cpu_cores_max: Option<u64>,
1157
1158 /// The minimum memory in Megabytes to reserve
1159 /// for an instance of this pipeline
1160 pub memory_mb_min: Option<u64>,
1161
1162 /// The maximum memory in Megabytes to reserve
1163 /// for an instance of this pipeline
1164 pub memory_mb_max: Option<u64>,
1165
1166 /// The total storage in Megabytes to reserve
1167 /// for an instance of this pipeline
1168 pub storage_mb_max: Option<u64>,
1169
1170 /// Storage class to use for an instance of this pipeline.
1171 /// The class determines storage performance such as IOPS and throughput.
1172 pub storage_class: Option<String>,
1173}