Skip to main content

feldera_types/
config.rs

1//! Controller configuration.
2//!
3//! This module defines the controller configuration structure.  The leaves of
4//! this structure are individual transport-specific and data-format-specific
5//! endpoint configs.  We represent these configs as opaque JSON values, so
6//! that the entire configuration tree can be deserialized from a JSON file.
7
8use crate::program_schema::ProgramSchema;
9use crate::secret_resolver::default_secrets_directory;
10use crate::transport::adhoc::AdHocInputConfig;
11use crate::transport::clock::ClockConfig;
12use crate::transport::datagen::DatagenInputConfig;
13use crate::transport::delta_table::{DeltaTableReaderConfig, DeltaTableWriterConfig};
14use crate::transport::file::{FileInputConfig, FileOutputConfig};
15use crate::transport::http::HttpInputConfig;
16use crate::transport::iceberg::IcebergReaderConfig;
17use crate::transport::kafka::{KafkaInputConfig, KafkaOutputConfig};
18use crate::transport::nats::NatsInputConfig;
19use crate::transport::nexmark::NexmarkInputConfig;
20use crate::transport::postgres::{PostgresReaderConfig, PostgresWriterConfig};
21use crate::transport::pubsub::PubSubInputConfig;
22use crate::transport::redis::RedisOutputConfig;
23use crate::transport::s3::S3InputConfig;
24use crate::transport::url::UrlInputConfig;
25use core::fmt;
26use feldera_ir::{MirNode, MirNodeId};
27use serde::de::{self, MapAccess, Visitor};
28use serde::{Deserialize, Deserializer, Serialize};
29use serde_json::Value as JsonValue;
30use std::collections::HashMap;
31use std::fmt::Display;
32use std::path::Path;
33use std::str::FromStr;
34use std::time::Duration;
35use std::{borrow::Cow, cmp::max, collections::BTreeMap};
36use utoipa::ToSchema;
37use utoipa::openapi::{ObjectBuilder, OneOfBuilder, Ref, RefOr, Schema, SchemaType};
38
39const DEFAULT_MAX_PARALLEL_CONNECTOR_INIT: u64 = 10;
40
41/// Default value of `ConnectorConfig::max_queued_records`.
42pub const fn default_max_queued_records() -> u64 {
43    1_000_000
44}
45
46/// Default maximum batch size for connectors, in records.
47///
48/// If you change this then update the comment on
49/// [ConnectorConfig::max_batch_size].
50pub const fn default_max_batch_size() -> u64 {
51    10_000
52}
53
54pub const DEFAULT_CLOCK_RESOLUTION_USECS: u64 = 1_000_000;
55
56/// Program information included in the pipeline configuration.
57#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq, Eq)]
58pub struct ProgramIr {
59    /// The MIR of the program.
60    pub mir: HashMap<MirNodeId, MirNode>,
61    /// Program schema.
62    pub program_schema: ProgramSchema,
63}
64
65/// Pipeline deployment configuration.
66/// It represents configuration entries directly provided by the user
67/// (e.g., runtime configuration) and entries derived from the schema
68/// of the compiled program (e.g., connectors). Storage configuration,
69/// if applicable, is set by the runner.
70#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, PartialEq)]
71pub struct PipelineConfig {
72    /// Global controller configuration.
73    #[serde(flatten)]
74    #[schema(inline)]
75    pub global: RuntimeConfig,
76
77    /// Configuration for multihost pipelines.
78    ///
79    /// The presence of this field indicates that the pipeline is running in
80    /// multihost mode.  In the pod with ordinal 0, this triggers starting the
81    /// coordinator process.  In all pods, this tells the pipeline process to
82    /// await a connection from the coordinator instead of initializing the
83    /// pipeline immediately.
84    pub multihost: Option<MultihostConfig>,
85
86    /// Unique system-generated name of the pipeline (format: `pipeline-<uuid>`).
87    /// It is unique across all tenants and cannot be changed.
88    ///
89    /// The `<uuid>` is also used in the naming of various resources that back the pipeline,
90    /// and as such this name is useful to find/identify corresponding resources.
91    pub name: Option<String>,
92
93    /// Name given by the tenant to the pipeline. It is only unique within the same tenant, and can
94    /// be changed by the tenant when the pipeline is stopped.
95    ///
96    /// Given a specific tenant, it can be used to find/identify a specific pipeline of theirs.
97    pub given_name: Option<String>,
98
99    /// Configuration for persistent storage
100    ///
101    /// If `global.storage` is `Some(_)`, this field must be set to some
102    /// [`StorageConfig`].  If `global.storage` is `None``, the pipeline ignores
103    /// this field.
104    #[serde(default)]
105    pub storage_config: Option<StorageConfig>,
106
107    /// Directory containing values of secrets.
108    ///
109    /// If this is not set, a default directory is used.
110    pub secrets_dir: Option<String>,
111
112    /// Input endpoint configuration.
113    #[serde(default)]
114    pub inputs: BTreeMap<Cow<'static, str>, InputEndpointConfig>,
115
116    /// Output endpoint configuration.
117    #[serde(default)]
118    pub outputs: BTreeMap<Cow<'static, str>, OutputEndpointConfig>,
119
120    /// Program information.
121    #[serde(default)]
122    pub program_ir: Option<ProgramIr>,
123}
124
125impl PipelineConfig {
126    pub fn max_parallel_connector_init(&self) -> u64 {
127        max(
128            self.global
129                .max_parallel_connector_init
130                .unwrap_or(DEFAULT_MAX_PARALLEL_CONNECTOR_INIT),
131            1,
132        )
133    }
134
135    pub fn with_storage(self, storage: Option<(StorageConfig, StorageOptions)>) -> Self {
136        let (storage_config, storage_options) = storage.unzip();
137        Self {
138            global: RuntimeConfig {
139                storage: storage_options,
140                ..self.global
141            },
142            storage_config,
143            ..self
144        }
145    }
146
147    pub fn storage(&self) -> Option<(&StorageConfig, &StorageOptions)> {
148        let storage_options = self.global.storage.as_ref();
149        let storage_config = self.storage_config.as_ref();
150        storage_config.zip(storage_options)
151    }
152
153    /// Returns `self.secrets_dir`, or the default secrets directory if it isn't
154    /// set.
155    pub fn secrets_dir(&self) -> &Path {
156        match &self.secrets_dir {
157            Some(dir) => Path::new(dir.as_str()),
158            None => default_secrets_directory(),
159        }
160    }
161
162    /// Abbreviated config that can be printed in the log on pipeline startup.
163    pub fn display_summary(&self) -> String {
164        // TODO: we may want to further abbreviate connector config.
165        let summary = serde_json::json!({
166            "name": self.name,
167            "given_name": self.given_name,
168            "global": self.global,
169            "storage_config": self.storage_config,
170            "secrets_dir": self.secrets_dir,
171            "inputs": self.inputs,
172            "outputs": self.outputs
173        });
174
175        serde_json::to_string_pretty(&summary).unwrap_or_else(|_| "{}".to_string())
176    }
177}
178
179/// A subset of fields in `PipelineConfig` that are generated by the compiler.
180/// These fields are shipped to the pipeline by the compilation server along with
181/// the program binary.
182// Note: An alternative would be to embed these fields in the program binary itself
183// as static strings. This would work well for program IR, but it would require recompiling
184// the program anytime a connector config changes, whereas today connector changes
185// do not require recompilation.
186#[derive(Default, Deserialize, Serialize, Eq, PartialEq, Debug, Clone)]
187pub struct PipelineConfigProgramInfo {
188    /// Input endpoint configuration.
189    pub inputs: BTreeMap<Cow<'static, str>, InputEndpointConfig>,
190
191    /// Output endpoint configuration.
192    #[serde(default)]
193    pub outputs: BTreeMap<Cow<'static, str>, OutputEndpointConfig>,
194
195    /// Program information.
196    #[serde(default)]
197    pub program_ir: Option<ProgramIr>,
198}
199
200/// Configuration for a multihost Feldera pipeline.
201///
202/// This configuration is primarily for the coordinator.
203#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
204pub struct MultihostConfig {
205    /// Number of hosts to launch.
206    ///
207    /// For the configuration to be truly multihost, this should be at least 2.
208    /// A value of 1 still runs the multihost coordinator but it only
209    /// coordinates a single host.
210    pub hosts: usize,
211}
212
213impl Default for MultihostConfig {
214    fn default() -> Self {
215        Self { hosts: 1 }
216    }
217}
218
219/// Configuration for persistent storage in a [`PipelineConfig`].
220#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
221pub struct StorageConfig {
222    /// A directory to keep pipeline state, as a path on the filesystem of the
223    /// machine or container where the pipeline will run.
224    ///
225    /// When storage is enabled, this directory stores the data for
226    /// [StorageBackendConfig::Default].
227    ///
228    /// When fault tolerance is enabled, this directory stores checkpoints and
229    /// the log.
230    pub path: String,
231
232    /// How to cache access to storage in this pipeline.
233    #[serde(default)]
234    pub cache: StorageCacheConfig,
235}
236
237impl StorageConfig {
238    pub fn path(&self) -> &Path {
239        Path::new(&self.path)
240    }
241}
242
243/// How to cache access to storage within a Feldera pipeline.
244#[derive(Copy, Clone, Default, Deserialize, Serialize, Debug, PartialEq, Eq, ToSchema)]
245#[serde(rename_all = "snake_case")]
246pub enum StorageCacheConfig {
247    /// Use the operating system's page cache as the primary storage cache.
248    ///
249    /// This is the default because it currently performs better than
250    /// `FelderaCache`.
251    #[default]
252    PageCache,
253
254    /// Use Feldera's internal cache implementation.
255    ///
256    /// This is under development. It will become the default when its
257    /// performance exceeds that of `PageCache`.
258    FelderaCache,
259}
260
261impl StorageCacheConfig {
262    #[cfg(unix)]
263    pub fn to_custom_open_flags(&self) -> i32 {
264        match self {
265            StorageCacheConfig::PageCache => (),
266            StorageCacheConfig::FelderaCache => {
267                #[cfg(target_os = "linux")]
268                return libc::O_DIRECT;
269            }
270        }
271        0
272    }
273}
274
275/// Storage configuration for a pipeline.
276#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
277#[serde(default)]
278pub struct StorageOptions {
279    /// How to connect to the underlying storage.
280    pub backend: StorageBackendConfig,
281
282    /// For a batch of data maintained as part of a persistent index during a
283    /// pipeline run, the minimum estimated number of bytes to write it to
284    /// storage.
285    ///
286    /// This is provided for debugging and fine-tuning and should ordinarily be
287    /// left unset.
288    ///
289    /// A value of 0 will write even empty batches to storage, and nonzero
290    /// values provide a threshold.  `usize::MAX` would effectively disable
291    /// storage for such batches.  The default is 10,048,576 (10 MiB).
292    pub min_storage_bytes: Option<usize>,
293
294    /// For a batch of data passed through the pipeline during a single step,
295    /// the minimum estimated number of bytes to write it to storage.
296    ///
297    /// This is provided for debugging and fine-tuning and should ordinarily be
298    /// left unset.  A value of 0 will write even empty batches to storage, and
299    /// nonzero values provide a threshold.  `usize::MAX`, the default,
300    /// effectively disables storage for such batches.  If it is set to another
301    /// value, it should ordinarily be greater than or equal to
302    /// `min_storage_bytes`.
303    pub min_step_storage_bytes: Option<usize>,
304
305    /// The form of compression to use in data batches.
306    ///
307    /// Compression has a CPU cost but it can take better advantage of limited
308    /// NVMe and network bandwidth, which means that it can increase overall
309    /// performance.
310    pub compression: StorageCompression,
311
312    /// The maximum size of the in-memory storage cache, in MiB.
313    ///
314    /// If set, the specified cache size is spread across all the foreground and
315    /// background threads. If unset, each foreground or background thread cache
316    /// is limited to 256 MiB.
317    pub cache_mib: Option<usize>,
318}
319
320/// Backend storage configuration.
321#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
322#[serde(tag = "name", content = "config", rename_all = "snake_case")]
323pub enum StorageBackendConfig {
324    /// Use the default storage configuration.
325    ///
326    /// This currently uses the local file system.
327    #[default]
328    Default,
329
330    /// Use the local file system.
331    ///
332    /// This uses ordinary system file operations.
333    File(Box<FileBackendConfig>),
334
335    /// Object storage.
336    Object(ObjectStorageConfig),
337}
338
339impl Display for StorageBackendConfig {
340    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
341        match self {
342            StorageBackendConfig::Default => write!(f, "default"),
343            StorageBackendConfig::File(_) => write!(f, "file"),
344            StorageBackendConfig::Object(_) => write!(f, "object"),
345        }
346    }
347}
348
349/// Storage compression algorithm.
350#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
351#[serde(rename_all = "snake_case")]
352pub enum StorageCompression {
353    /// Use Feldera's default compression algorithm.
354    ///
355    /// The default may change as Feldera's performance is tuned and new
356    /// algorithms are introduced.
357    #[default]
358    Default,
359
360    /// Do not compress.
361    None,
362
363    /// Use [Snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) compression.
364    Snappy,
365}
366
367#[derive(Debug, Clone, Eq, PartialEq)]
368pub enum StartFromCheckpoint {
369    Latest,
370    Uuid(uuid::Uuid),
371}
372
373impl ToSchema<'_> for StartFromCheckpoint {
374    fn schema() -> (
375        &'static str,
376        utoipa::openapi::RefOr<utoipa::openapi::schema::Schema>,
377    ) {
378        (
379            "StartFromCheckpoint",
380            utoipa::openapi::RefOr::T(Schema::OneOf(
381                OneOfBuilder::new()
382                    .item(
383                        ObjectBuilder::new()
384                            .schema_type(SchemaType::String)
385                            .enum_values(Some(["latest"].into_iter()))
386                            .build(),
387                    )
388                    .item(
389                        ObjectBuilder::new()
390                            .schema_type(SchemaType::String)
391                            .format(Some(utoipa::openapi::SchemaFormat::KnownFormat(
392                                utoipa::openapi::KnownFormat::Uuid,
393                            )))
394                            .build(),
395                    )
396                    .nullable(true)
397                    .build(),
398            )),
399        )
400    }
401}
402
403impl<'de> Deserialize<'de> for StartFromCheckpoint {
404    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
405    where
406        D: Deserializer<'de>,
407    {
408        struct StartFromCheckpointVisitor;
409
410        impl<'de> Visitor<'de> for StartFromCheckpointVisitor {
411            type Value = StartFromCheckpoint;
412
413            fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
414                formatter.write_str("a UUID string or the string \"latest\"")
415            }
416
417            fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
418            where
419                E: de::Error,
420            {
421                if value == "latest" {
422                    Ok(StartFromCheckpoint::Latest)
423                } else {
424                    uuid::Uuid::parse_str(value)
425                        .map(StartFromCheckpoint::Uuid)
426                        .map_err(|_| E::invalid_value(serde::de::Unexpected::Str(value), &self))
427                }
428            }
429        }
430
431        deserializer.deserialize_str(StartFromCheckpointVisitor)
432    }
433}
434
435impl Serialize for StartFromCheckpoint {
436    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
437    where
438        S: serde::Serializer,
439    {
440        match self {
441            StartFromCheckpoint::Latest => serializer.serialize_str("latest"),
442            StartFromCheckpoint::Uuid(uuid) => serializer.serialize_str(&uuid.to_string()),
443        }
444    }
445}
446
447#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
448pub struct SyncConfig {
449    /// The endpoint URL for the storage service.
450    ///
451    /// This is typically required for custom or local S3-compatible storage providers like MinIO.
452    /// Example: `http://localhost:9000`
453    ///
454    /// Relevant rclone config key: [`endpoint`](https://rclone.org/s3/#s3-endpoint)
455    pub endpoint: Option<String>,
456
457    /// The name of the storage bucket.
458    ///
459    /// This may include a path to a folder inside the bucket (e.g., `my-bucket/data`).
460    pub bucket: String,
461
462    /// The region that this bucket is in.
463    ///
464    /// Leave empty for Minio or the default region (`us-east-1` for AWS).
465    pub region: Option<String>,
466
467    /// The name of the cloud storage provider (e.g., `"AWS"`, `"Minio"`).
468    ///
469    /// Used for provider-specific behavior in rclone.
470    /// If omitted, defaults to `"Other"`.
471    ///
472    /// See [rclone S3 provider documentation](https://rclone.org/s3/#s3-provider)
473    pub provider: Option<String>,
474
475    /// The access key used to authenticate with the storage provider.
476    ///
477    /// If not provided, rclone will fall back to environment-based credentials, such as
478    /// `RCLONE_S3_ACCESS_KEY_ID`. In Kubernetes environments using IRSA (IAM Roles for Service Accounts),
479    /// this can be left empty to allow automatic authentication via the pod's service account.
480    pub access_key: Option<String>,
481
482    /// The secret key used together with the access key for authentication.
483    ///
484    /// If not provided, rclone will fall back to environment-based credentials, such as
485    /// `RCLONE_S3_SECRET_ACCESS_KEY`. In Kubernetes environments using IRSA (IAM Roles for Service Accounts),
486    /// this can be left empty to allow automatic authentication via the pod's service account.
487    pub secret_key: Option<String>,
488
489    /// When set, the pipeline will try fetch the specified checkpoint from the
490    /// object store.
491    ///
492    /// If `fail_if_no_checkpoint` is `true`, the pipeline will fail to initialize.
493    pub start_from_checkpoint: Option<StartFromCheckpoint>,
494
495    /// When true, the pipeline will fail to initialize if fetching the
496    /// specified checkpoint fails (missing, download error).
497    /// When false, the pipeline will start from scratch instead.
498    ///
499    /// False by default.
500    #[schema(default = std::primitive::bool::default)]
501    #[serde(default)]
502    pub fail_if_no_checkpoint: bool,
503
504    /// The number of file transfers to run in parallel.
505    /// Default: 20
506    pub transfers: Option<u8>,
507
508    /// The number of checkers to run in parallel.
509    /// Default: 20
510    pub checkers: Option<u8>,
511
512    /// Set to skip post copy check of checksums, and only check the file sizes.
513    /// This can significantly improve the throughput.
514    /// Defualt: false
515    pub ignore_checksum: Option<bool>,
516
517    /// Number of streams to use for multi-thread downloads.
518    /// Default: 10
519    pub multi_thread_streams: Option<u8>,
520
521    /// Use multi-thread download for files above this size.
522    /// Format: `[size][Suffix]` (Example: 1G, 500M)
523    /// Supported suffixes: k|M|G|T
524    /// Default: 100M
525    pub multi_thread_cutoff: Option<String>,
526
527    /// The number of chunks of the same file that are uploaded for multipart uploads.
528    /// Default: 10
529    pub upload_concurrency: Option<u8>,
530
531    /// When `true`, the pipeline starts in **standby** mode; processing doesn't
532    /// start until activation (`POST /activate`).
533    /// If this pipeline was previously activated and the storage has not been
534    /// cleared, the pipeline will auto activate, no newer checkpoints will be
535    /// fetched.
536    ///
537    /// Standby behavior depends on `start_from_checkpoint`:
538    /// - If `latest`, pipeline continuously fetches the latest available
539    ///   checkpoint until activated.
540    /// - If checkpoint UUID, pipeline fetches this checkpoint once and waits
541    ///   in standby until activated.
542    ///
543    /// Default: `false`
544    #[schema(default = std::primitive::bool::default)]
545    #[serde(default)]
546    pub standby: bool,
547
548    /// The interval (in seconds) between each attempt to fetch the latest
549    /// checkpoint from object store while in standby mode.
550    ///
551    /// Applies only when `start_from_checkpoint` is set to `latest`.
552    ///
553    /// Default: 10 seconds
554    #[schema(default = default_pull_interval)]
555    #[serde(default = "default_pull_interval")]
556    pub pull_interval: u64,
557
558    /// The interval (in seconds) between each push of checkpoints to object store.
559    ///
560    /// Default: disabled (no periodic push).
561    #[serde(default)]
562    pub push_interval: Option<u64>,
563
564    /// Extra flags to pass to `rclone`.
565    ///
566    /// WARNING: Supplying incorrect or conflicting flags can break `rclone`.
567    /// Use with caution.
568    ///
569    /// Refer to the docs to see the supported flags:
570    /// - [Global flags](https://rclone.org/flags/)
571    /// - [S3 specific flags](https://rclone.org/s3/)
572    pub flags: Option<Vec<String>>,
573
574    /// The minimum number of checkpoints to retain in object store.
575    /// No checkpoints will be deleted if the total count is below this threshold.
576    ///
577    /// Default: 10
578    #[schema(default = default_retention_min_count)]
579    #[serde(default = "default_retention_min_count")]
580    pub retention_min_count: u32,
581
582    /// The minimum age (in days) a checkpoint must reach before it becomes
583    /// eligible for deletion. All younger checkpoints will be preserved.
584    ///
585    /// Default: 30
586    #[schema(default = default_retention_min_age)]
587    #[serde(default = "default_retention_min_age")]
588    pub retention_min_age: u32,
589}
590
591fn default_pull_interval() -> u64 {
592    10
593}
594
595fn default_retention_min_count() -> u32 {
596    10
597}
598
599fn default_retention_min_age() -> u32 {
600    30
601}
602
603impl SyncConfig {
604    pub fn validate(&self) -> Result<(), String> {
605        if self.standby && self.start_from_checkpoint.is_none() {
606            return Err(r#"invalid sync config: `standby` set to `true` but `start_from_checkpoint` not set.
607Standby mode requires `start_from_checkpoint` to be set.
608Consider setting `start_from_checkpoint` to `"latest"`."#.to_owned());
609        }
610
611        Ok(())
612    }
613}
614
615/// Configuration for supplying a custom pipeline StatefulSet template via a Kubernetes ConfigMap.
616///
617/// Operators can provide a custom StatefulSet YAML that the Kubernetes runner will use when
618/// creating pipeline StatefulSets for a pipeline. The custom template must be stored as the
619/// value of a key in a ConfigMap in the same namespace as the pipeline; set `name` to the
620/// ConfigMap name and `key` to the entry that contains the template.
621///
622/// Recommendations and requirements:
623/// - **Start from the default template and modify it as needed.** The default template is present
624///   in ConfigMap named as `<release-name>-pipeline-template`, with key `pipelineTemplate` in the release
625///   namespace and should be used as a reference.
626/// - The template must contain a valid Kubernetes `StatefulSet` manifest in YAML form. The
627///   runner substitutes variables in the template before parsing; therefore the final YAML
628///   must be syntactically valid.
629/// - The runner performs simple string substitution for the following placeholders. Please ensure these
630///   placeholders are placed at appropriate location for their semantics:
631///   - `{id}`: pipeline Kubernetes name (used for object names and labels)
632///   - `{namespace}`: Kubernetes namespace where the pipeline runs
633///   - `{pipeline_executor_image}`: container image used to run the pipeline executor
634///   - `{binary_ref}`: program binary reference passed as an argument
635///   - `{program_info_ref}`: program info reference passed as an argument
636///   - `{pipeline_storage_path}`: mount path for persistent pipeline storage
637///   - `{storage_class_name}`: storage class name to use for PVCs (if applicable)
638///   - `{deployment_id}`: UUID identifying the deployment instance
639///   - `{deployment_initial}`: initial desired runtime status (e.g., `provisioning`)
640///   - `{bootstrap_policy}`: bootstrap policy value when applicable
641#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
642pub struct PipelineTemplateConfig {
643    /// Name of the ConfigMap containing the pipeline template.
644    pub name: String,
645    /// Key in the ConfigMap containing the pipeline template.
646    ///
647    /// If not set, defaults to `pipelineTemplate`.
648    #[schema(default = default_pipeline_template_key)]
649    #[serde(default = "default_pipeline_template_key")]
650    pub key: String,
651}
652
653fn default_pipeline_template_key() -> String {
654    "pipelineTemplate".to_string()
655}
656
657#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
658pub struct ObjectStorageConfig {
659    /// URL.
660    ///
661    /// The following URL schemes are supported:
662    ///
663    /// * S3:
664    ///   - `s3://<bucket>/<path>`
665    ///   - `s3a://<bucket>/<path>`
666    ///   - `https://s3.<region>.amazonaws.com/<bucket>`
667    ///   - `https://<bucket>.s3.<region>.amazonaws.com`
668    ///   - `https://ACCOUNT_ID.r2.cloudflarestorage.com/bucket`
669    /// * Google Cloud Storage:
670    ///   - `gs://<bucket>/<path>`
671    /// * Microsoft Azure Blob Storage:
672    ///   - `abfs[s]://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
673    ///   - `abfs[s]://<file_system>@<account_name>.dfs.core.windows.net/<path>`
674    ///   - `abfs[s]://<file_system>@<account_name>.dfs.fabric.microsoft.com/<path>`
675    ///   - `az://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
676    ///   - `adl://<container>/<path>` (according to [fsspec](https://github.com/fsspec/adlfs))
677    ///   - `azure://<container>/<path>` (custom)
678    ///   - `https://<account>.dfs.core.windows.net`
679    ///   - `https://<account>.blob.core.windows.net`
680    ///   - `https://<account>.blob.core.windows.net/<container>`
681    ///   - `https://<account>.dfs.fabric.microsoft.com`
682    ///   - `https://<account>.dfs.fabric.microsoft.com/<container>`
683    ///   - `https://<account>.blob.fabric.microsoft.com`
684    ///   - `https://<account>.blob.fabric.microsoft.com/<container>`
685    ///
686    /// Settings derived from the URL will override other settings.
687    pub url: String,
688
689    /// Additional options as key-value pairs.
690    ///
691    /// The following keys are supported:
692    ///
693    /// * S3:
694    ///   - `access_key_id`: AWS Access Key.
695    ///   - `secret_access_key`: AWS Secret Access Key.
696    ///   - `region`: Region.
697    ///   - `default_region`: Default region.
698    ///   - `endpoint`: Custom endpoint for communicating with S3,
699    ///     e.g. `https://localhost:4566` for testing against a localstack
700    ///     instance.
701    ///   - `token`: Token to use for requests (passed to underlying provider).
702    ///   - [Other keys](https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html#variants).
703    /// * Google Cloud Storage:
704    ///   - `service_account`: Path to the service account file.
705    ///   - `service_account_key`: The serialized service account key.
706    ///   - `google_application_credentials`: Application credentials path.
707    ///   - [Other keys](https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html).
708    /// * Microsoft Azure Blob Storage:
709    ///   - `access_key`: Azure Access Key.
710    ///   - `container_name`: Azure Container Name.
711    ///   - `account`: Azure Account.
712    ///   - `bearer_token_authorization`: Static bearer token for authorizing requests.
713    ///   - `client_id`: Client ID for use in client secret or Kubernetes federated credential flow.
714    ///   - `client_secret`: Client secret for use in client secret flow.
715    ///   - `tenant_id`: Tenant ID for use in client secret or Kubernetes federated credential flow.
716    ///   - `endpoint`: Override the endpoint for communicating with blob storage.
717    ///   - [Other keys](https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html#variants).
718    ///
719    /// Options set through the URL take precedence over those set with these
720    /// options.
721    #[serde(flatten)]
722    pub other_options: BTreeMap<String, String>,
723}
724
725/// Configuration for local file system access.
726#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
727#[serde(default)]
728pub struct FileBackendConfig {
729    /// Whether to use background threads for file I/O.
730    ///
731    /// Background threads should improve performance, but they can reduce
732    /// performance if too few cores are available. This is provided for
733    /// debugging and fine-tuning and should ordinarily be left unset.
734    pub async_threads: Option<bool>,
735
736    /// Per-I/O operation sleep duration, in milliseconds.
737    ///
738    /// This is for simulating slow storage devices.  Do not use this in
739    /// production.
740    pub ioop_delay: Option<u64>,
741
742    /// Configuration to synchronize checkpoints to object store.
743    pub sync: Option<SyncConfig>,
744}
745
746/// Global pipeline configuration settings. This is the publicly
747/// exposed type for users to configure pipelines.
748#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)]
749#[serde(default)]
750pub struct RuntimeConfig {
751    /// Number of DBSP worker threads.
752    ///
753    /// Each DBSP "foreground" worker thread is paired with a "background"
754    /// thread for LSM merging, making the total number of threads twice the
755    /// specified number.
756    ///
757    /// The typical sweet spot for the number of workers is between 4 and 16.
758    /// Each worker increases overall memory consumption for data structures
759    /// used during a step.
760    pub workers: u16,
761
762    /// Number of DBSP hosts.
763    ///
764    /// The worker threads are evenly divided among the hosts.  For single-host
765    /// deployments, this should be 1 (the default).
766    ///
767    /// Multihost pipelines are an enterprise-only preview feature.
768    pub hosts: usize,
769
770    /// Storage configuration.
771    ///
772    /// - If this is `None`, the default, the pipeline's state is kept in
773    ///   in-memory data-structures.  This is useful if the pipeline's state
774    ///   will fit in memory and if the pipeline is ephemeral and does not need
775    ///   to be recovered after a restart. The pipeline will most likely run
776    ///   faster since it does not need to access storage.
777    ///
778    /// - If set, the pipeline's state is kept on storage.  This allows the
779    ///   pipeline to work with state that will not fit into memory. It also
780    ///   allows the state to be checkpointed and recovered across restarts.
781    #[serde(deserialize_with = "deserialize_storage_options")]
782    pub storage: Option<StorageOptions>,
783
784    /// Fault tolerance configuration.
785    #[serde(deserialize_with = "deserialize_fault_tolerance")]
786    pub fault_tolerance: FtConfig,
787
788    /// Enable CPU profiler.
789    ///
790    /// The default value is `true`.
791    pub cpu_profiler: bool,
792
793    /// Enable pipeline tracing.
794    pub tracing: bool,
795
796    /// Jaeger tracing endpoint to send tracing information to.
797    pub tracing_endpoint_jaeger: String,
798
799    /// Minimal input batch size.
800    ///
801    /// The controller delays pushing input records to the circuit until at
802    /// least `min_batch_size_records` records have been received (total
803    /// across all endpoints) or `max_buffering_delay_usecs` microseconds
804    /// have passed since at least one input records has been buffered.
805    /// Defaults to 0.
806    pub min_batch_size_records: u64,
807
808    /// Maximal delay in microseconds to wait for `min_batch_size_records` to
809    /// get buffered by the controller, defaults to 0.
810    pub max_buffering_delay_usecs: u64,
811
812    /// Resource reservations and limits. This is enforced
813    /// only in Feldera Cloud.
814    pub resources: ResourceConfig,
815
816    /// Real-time clock resolution in microseconds.
817    ///
818    /// This parameter controls the execution of queries that use the `NOW()` function.  The output of such
819    /// queries depends on the real-time clock and can change over time without any external
820    /// inputs.  If the query uses `NOW()`, the pipeline will update the clock value and trigger incremental
821    /// recomputation at most each `clock_resolution_usecs` microseconds.  If the query does not use
822    /// `NOW()`, then clock value updates are suppressed and the pipeline ignores this setting.
823    ///
824    /// It is set to 1 second (1,000,000 microseconds) by default.
825    pub clock_resolution_usecs: Option<u64>,
826
827    /// Optionally, a list of CPU numbers for CPUs to which the pipeline may pin
828    /// its worker threads.  Specify at least twice as many CPU numbers as
829    /// workers.  CPUs are generally numbered starting from 0.  The pipeline
830    /// might not be able to honor CPU pinning requests.
831    ///
832    /// CPU pinning can make pipelines run faster and perform more consistently,
833    /// as long as different pipelines running on the same machine are pinned to
834    /// different CPUs.
835    pub pin_cpus: Vec<usize>,
836
837    /// Timeout in seconds for the `Provisioning` phase of the pipeline.
838    /// Setting this value will override the default of the runner.
839    pub provisioning_timeout_secs: Option<u64>,
840
841    /// The maximum number of connectors initialized in parallel during pipeline
842    /// startup.
843    ///
844    /// At startup, the pipeline must initialize all of its input and output connectors.
845    /// Depending on the number and types of connectors, this can take a long time.
846    /// To accelerate the process, multiple connectors are initialized concurrently.
847    /// This option controls the maximum number of connectors that can be initialized
848    /// in parallel.
849    ///
850    /// The default is 10.
851    pub max_parallel_connector_init: Option<u64>,
852
853    /// Specification of additional (sidecar) containers.
854    pub init_containers: Option<serde_json::Value>,
855
856    /// Deprecated: setting this true or false does not have an effect anymore.
857    pub checkpoint_during_suspend: bool,
858
859    /// Sets the number of available runtime threads for the http server.
860    ///
861    /// In most cases, this does not need to be set explicitly and
862    /// the default is sufficient. Can be increased in case the
863    /// pipeline HTTP API operations are a bottleneck.
864    ///
865    /// If not specified, the default is set to `workers`.
866    pub http_workers: Option<u64>,
867
868    /// Sets the number of available runtime threads for async IO tasks.
869    ///
870    /// This affects some networking and file I/O operations
871    /// especially adapters and ad-hoc queries.
872    ///
873    /// In most cases, this does not need to be set explicitly and
874    /// the default is sufficient. Can be increased in case
875    /// ingress, egress or ad-hoc queries are a bottleneck.
876    ///
877    /// If not specified, the default is set to `workers`.
878    pub io_workers: Option<u64>,
879
880    /// Optional settings for tweaking Feldera internals.
881    ///
882    /// The available key-value pairs change from one version of Feldera to
883    /// another, so users should not depend on particular settings being
884    /// available, or on their behavior.
885    pub dev_tweaks: BTreeMap<String, serde_json::Value>,
886
887    /// Log filtering directives.
888    ///
889    /// If set to a valid [tracing-subscriber] filter, this controls the log
890    /// messages emitted by the pipeline process.  Otherwise, or if the filter
891    /// has invalid syntax, messages at "info" severity and higher are written
892    /// to the log and all others are discarded.
893    ///
894    /// [tracing-subscriber]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
895    pub logging: Option<String>,
896
897    /// ConfigMap containing a custom pipeline template (Enterprise only).
898    ///
899    /// This feature is only available in Feldera Enterprise. If set, the Kubernetes runner
900    /// will read the template from the specified ConfigMap and use it instead of the default
901    /// StatefulSet template for the configured pipeline.
902    ///
903    /// check [`PipelineTemplateConfig`] documentation for details.
904    pub pipeline_template_configmap: Option<PipelineTemplateConfig>,
905}
906
907/// Accepts "true" and "false" and converts them to the new format.
908fn deserialize_storage_options<'de, D>(deserializer: D) -> Result<Option<StorageOptions>, D::Error>
909where
910    D: Deserializer<'de>,
911{
912    struct BoolOrStruct;
913
914    impl<'de> Visitor<'de> for BoolOrStruct {
915        type Value = Option<StorageOptions>;
916
917        fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
918            formatter.write_str("boolean or StorageOptions")
919        }
920
921        fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
922        where
923            E: de::Error,
924        {
925            match v {
926                false => Ok(None),
927                true => Ok(Some(StorageOptions::default())),
928            }
929        }
930
931        fn visit_unit<E>(self) -> Result<Self::Value, E>
932        where
933            E: de::Error,
934        {
935            Ok(None)
936        }
937
938        fn visit_none<E>(self) -> Result<Self::Value, E>
939        where
940            E: de::Error,
941        {
942            Ok(None)
943        }
944
945        fn visit_map<M>(self, map: M) -> Result<Option<StorageOptions>, M::Error>
946        where
947            M: MapAccess<'de>,
948        {
949            Deserialize::deserialize(de::value::MapAccessDeserializer::new(map)).map(Some)
950        }
951    }
952
953    deserializer.deserialize_any(BoolOrStruct)
954}
955
956/// Accepts very old 'initial_state' and 'latest_checkpoint' as enabling fault
957/// tolerance.
958///
959/// Accepts `null` as disabling fault tolerance.
960///
961/// Otherwise, deserializes [FtConfig] in the way that one might otherwise
962/// expect.
963fn deserialize_fault_tolerance<'de, D>(deserializer: D) -> Result<FtConfig, D::Error>
964where
965    D: Deserializer<'de>,
966{
967    struct StringOrStruct;
968
969    impl<'de> Visitor<'de> for StringOrStruct {
970        type Value = FtConfig;
971
972        fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
973            formatter.write_str("none or FtConfig or 'initial_state' or 'latest_checkpoint'")
974        }
975
976        fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
977        where
978            E: de::Error,
979        {
980            match v {
981                "initial_state" | "latest_checkpoint" => Ok(FtConfig {
982                    model: Some(FtModel::default()),
983                    ..FtConfig::default()
984                }),
985                _ => Err(de::Error::invalid_value(de::Unexpected::Str(v), &self)),
986            }
987        }
988
989        fn visit_unit<E>(self) -> Result<Self::Value, E>
990        where
991            E: de::Error,
992        {
993            Ok(FtConfig::default())
994        }
995
996        fn visit_none<E>(self) -> Result<Self::Value, E>
997        where
998            E: de::Error,
999        {
1000            Ok(FtConfig::default())
1001        }
1002
1003        fn visit_map<M>(self, map: M) -> Result<FtConfig, M::Error>
1004        where
1005            M: MapAccess<'de>,
1006        {
1007            Deserialize::deserialize(de::value::MapAccessDeserializer::new(map))
1008        }
1009    }
1010
1011    deserializer.deserialize_any(StringOrStruct)
1012}
1013
1014impl Default for RuntimeConfig {
1015    fn default() -> Self {
1016        Self {
1017            workers: 8,
1018            hosts: 1,
1019            storage: Some(StorageOptions::default()),
1020            fault_tolerance: FtConfig::default(),
1021            cpu_profiler: true,
1022            tracing: {
1023                // We discovered that the jaeger crate can use up gigabytes of RAM, so it's not harmless
1024                // to keep it on by default.
1025                false
1026            },
1027            tracing_endpoint_jaeger: "127.0.0.1:6831".to_string(),
1028            min_batch_size_records: 0,
1029            max_buffering_delay_usecs: 0,
1030            resources: ResourceConfig::default(),
1031            clock_resolution_usecs: { Some(DEFAULT_CLOCK_RESOLUTION_USECS) },
1032            pin_cpus: Vec::new(),
1033            provisioning_timeout_secs: None,
1034            max_parallel_connector_init: None,
1035            init_containers: None,
1036            checkpoint_during_suspend: true,
1037            io_workers: None,
1038            http_workers: None,
1039            dev_tweaks: BTreeMap::default(),
1040            logging: None,
1041            pipeline_template_configmap: None,
1042        }
1043    }
1044}
1045
1046/// Fault-tolerance configuration.
1047///
1048/// The default [FtConfig] (via [FtConfig::default]) disables fault tolerance,
1049/// which is the configuration that one gets if [RuntimeConfig] omits fault
1050/// tolerance configuration.
1051///
1052/// The default value for [FtConfig::model] enables fault tolerance, as
1053/// `Some(FtModel::default())`.  This is the configuration that one gets if
1054/// [RuntimeConfig] includes a fault tolerance configuration but does not
1055/// specify a particular model.
1056#[derive(Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1057#[serde(rename_all = "snake_case")]
1058pub struct FtConfig {
1059    /// Fault tolerance model to use.
1060    #[serde(with = "none_as_string")]
1061    #[serde(default = "default_model")]
1062    #[schema(
1063        schema_with = none_as_string_schema::<FtModel>,
1064    )]
1065    pub model: Option<FtModel>,
1066
1067    /// Interval between automatic checkpoints, in seconds.
1068    ///
1069    /// The default is 60 seconds.  Values less than 1 or greater than 3600 will
1070    /// be forced into that range.
1071    #[serde(default = "default_checkpoint_interval_secs")]
1072    pub checkpoint_interval_secs: Option<u64>,
1073}
1074
1075fn default_model() -> Option<FtModel> {
1076    Some(FtModel::default())
1077}
1078
1079pub fn default_checkpoint_interval_secs() -> Option<u64> {
1080    Some(60)
1081}
1082
1083impl Default for FtConfig {
1084    fn default() -> Self {
1085        Self {
1086            model: None,
1087            checkpoint_interval_secs: default_checkpoint_interval_secs(),
1088        }
1089    }
1090}
1091
1092#[cfg(test)]
1093mod test {
1094    use super::deserialize_fault_tolerance;
1095    use crate::config::{FtConfig, FtModel};
1096    use serde::{Deserialize, Serialize};
1097
1098    #[test]
1099    fn ft_config() {
1100        #[derive(Serialize, Deserialize, Default, PartialEq, Eq, Debug)]
1101        #[serde(default)]
1102        struct Wrapper {
1103            #[serde(deserialize_with = "deserialize_fault_tolerance")]
1104            config: FtConfig,
1105        }
1106
1107        // Omitting FtConfig, or specifying null, or specifying model "none", disables fault tolerance.
1108        for s in [
1109            "{}",
1110            r#"{"config": null}"#,
1111            r#"{"config": {"model": "none"}}"#,
1112        ] {
1113            let config: Wrapper = serde_json::from_str(s).unwrap();
1114            assert_eq!(
1115                config,
1116                Wrapper {
1117                    config: FtConfig {
1118                        model: None,
1119                        checkpoint_interval_secs: Some(60)
1120                    }
1121                }
1122            );
1123        }
1124
1125        // Serializing disabled FT produces explicit "none" form.
1126        let s = serde_json::to_string(&Wrapper {
1127            config: FtConfig::default(),
1128        })
1129        .unwrap();
1130        assert!(s.contains("\"none\""));
1131
1132        // `{}` for FtConfig, or `{...}` with `model` omitted, enables fault
1133        // tolerance.
1134        for s in [r#"{"config": {}}"#, r#"{"checkpoint_interval_secs": 60}"#] {
1135            assert_eq!(
1136                serde_json::from_str::<FtConfig>(s).unwrap(),
1137                FtConfig {
1138                    model: Some(FtModel::default()),
1139                    checkpoint_interval_secs: Some(60)
1140                }
1141            );
1142        }
1143
1144        // `"checkpoint_interval_secs": null` disables periodic checkpointing.
1145        assert_eq!(
1146            serde_json::from_str::<FtConfig>(r#"{"checkpoint_interval_secs": null}"#).unwrap(),
1147            FtConfig {
1148                model: Some(FtModel::default()),
1149                checkpoint_interval_secs: None
1150            }
1151        );
1152    }
1153}
1154
1155impl FtConfig {
1156    pub fn is_enabled(&self) -> bool {
1157        self.model.is_some()
1158    }
1159
1160    /// Returns the checkpoint interval, if fault tolerance is enabled, and
1161    /// otherwise `None`.
1162    pub fn checkpoint_interval(&self) -> Option<Duration> {
1163        if self.is_enabled() {
1164            self.checkpoint_interval_secs
1165                .map(|interval| Duration::from_secs(interval.clamp(1, 3600)))
1166        } else {
1167            None
1168        }
1169    }
1170}
1171
1172/// Serde implementation for de/serializing a string into `Option<T>` where
1173/// `"none"` indicates `None` and any other string indicates `Some`.
1174///
1175/// This could be extended to handle non-strings by adding more forwarding
1176/// `visit_*` methods to the Visitor implementation.  I don't see a way to write
1177/// them automatically.
1178mod none_as_string {
1179    use std::marker::PhantomData;
1180
1181    use serde::de::{Deserialize, Deserializer, IntoDeserializer, Visitor};
1182    use serde::ser::{Serialize, Serializer};
1183
1184    pub(super) fn serialize<S, T>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
1185    where
1186        S: Serializer,
1187        T: Serialize,
1188    {
1189        match value.as_ref() {
1190            Some(value) => value.serialize(serializer),
1191            None => "none".serialize(serializer),
1192        }
1193    }
1194
1195    struct NoneAsString<T>(PhantomData<fn() -> T>);
1196
1197    impl<'de, T> Visitor<'de> for NoneAsString<T>
1198    where
1199        T: Deserialize<'de>,
1200    {
1201        type Value = Option<T>;
1202
1203        fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
1204            formatter.write_str("string")
1205        }
1206
1207        fn visit_none<E>(self) -> Result<Self::Value, E>
1208        where
1209            E: serde::de::Error,
1210        {
1211            Ok(None)
1212        }
1213
1214        fn visit_str<E>(self, value: &str) -> Result<Option<T>, E>
1215        where
1216            E: serde::de::Error,
1217        {
1218            if &value.to_ascii_lowercase() == "none" {
1219                Ok(None)
1220            } else {
1221                Ok(Some(T::deserialize(value.into_deserializer())?))
1222            }
1223        }
1224    }
1225
1226    pub(super) fn deserialize<'de, D, T>(deserializer: D) -> Result<Option<T>, D::Error>
1227    where
1228        D: Deserializer<'de>,
1229        T: Deserialize<'de>,
1230    {
1231        deserializer.deserialize_str(NoneAsString(PhantomData))
1232    }
1233}
1234
1235/// Generates an OpenAPI schema for an `Option<T>` field serialized with `none_as_string`.
1236/// The schema is a `oneOf` with a reference to `T`'s schema and a `"none"` string enum.
1237fn none_as_string_schema<'a, T: ToSchema<'a> + Default + Serialize>() -> Schema {
1238    Schema::OneOf(
1239        OneOfBuilder::new()
1240            .item(RefOr::Ref(Ref::new(format!(
1241                "#/components/schemas/{}",
1242                T::schema().0
1243            ))))
1244            .item(
1245                ObjectBuilder::new()
1246                    .schema_type(SchemaType::String)
1247                    .enum_values(Some(vec!["none"])),
1248            )
1249            .default(Some(
1250                serde_json::to_value(T::default()).expect("Failed to serialize default value"),
1251            ))
1252            .build(),
1253    )
1254}
1255
1256/// Fault tolerance model.
1257///
1258/// The ordering is significant: we consider [Self::ExactlyOnce] to be a "higher
1259/// level" of fault tolerance than [Self::AtLeastOnce].
1260#[derive(
1261    Debug, Copy, Clone, Default, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, ToSchema,
1262)]
1263#[serde(rename_all = "snake_case")]
1264pub enum FtModel {
1265    /// Each record is output at least once.  Crashes may duplicate output, but
1266    /// no input or output is dropped.
1267    AtLeastOnce,
1268
1269    /// Each record is output exactly once.  Crashes do not drop or duplicate
1270    /// input or output.
1271    #[default]
1272    ExactlyOnce,
1273}
1274
1275impl FtModel {
1276    pub fn option_as_str(value: Option<FtModel>) -> &'static str {
1277        value.map_or("no", |model| model.as_str())
1278    }
1279
1280    pub fn as_str(&self) -> &'static str {
1281        match self {
1282            FtModel::AtLeastOnce => "at_least_once",
1283            FtModel::ExactlyOnce => "exactly_once",
1284        }
1285    }
1286}
1287
1288pub struct FtModelUnknown;
1289
1290impl FromStr for FtModel {
1291    type Err = FtModelUnknown;
1292
1293    fn from_str(s: &str) -> Result<Self, Self::Err> {
1294        match s.to_ascii_lowercase().as_str() {
1295            "exactly_once" => Ok(Self::ExactlyOnce),
1296            "at_least_once" => Ok(Self::AtLeastOnce),
1297            _ => Err(FtModelUnknown),
1298        }
1299    }
1300}
1301
1302/// Describes an input connector configuration
1303#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1304pub struct InputEndpointConfig {
1305    /// The name of the input stream of the circuit that this endpoint is
1306    /// connected to.
1307    pub stream: Cow<'static, str>,
1308
1309    /// Connector configuration.
1310    #[serde(flatten)]
1311    pub connector_config: ConnectorConfig,
1312}
1313
1314/// Deserialize the `start_after` property of a connector configuration.
1315/// It requires a non-standard deserialization because we want to accept
1316/// either a string or an array of strings.
1317fn deserialize_start_after<'de, D>(deserializer: D) -> Result<Option<Vec<String>>, D::Error>
1318where
1319    D: Deserializer<'de>,
1320{
1321    let value = Option::<JsonValue>::deserialize(deserializer)?;
1322    match value {
1323        Some(JsonValue::String(s)) => Ok(Some(vec![s])),
1324        Some(JsonValue::Array(arr)) => {
1325            let vec = arr
1326                .into_iter()
1327                .map(|item| {
1328                    item.as_str()
1329                        .map(|s| s.to_string())
1330                        .ok_or_else(|| serde::de::Error::custom("invalid 'start_after' property: expected a string, an array of strings, or null"))
1331                })
1332                .collect::<Result<Vec<String>, _>>()?;
1333            Ok(Some(vec))
1334        }
1335        Some(JsonValue::Null) | None => Ok(None),
1336        _ => Err(serde::de::Error::custom(
1337            "invalid 'start_after' property: expected a string, an array of strings, or null",
1338        )),
1339    }
1340}
1341
1342/// A data connector's configuration
1343#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1344pub struct ConnectorConfig {
1345    /// Transport endpoint configuration.
1346    pub transport: TransportConfig,
1347
1348    /// Parser configuration.
1349    pub format: Option<FormatConfig>,
1350
1351    /// Name of the index that the connector is attached to.
1352    ///
1353    /// This property is valid for output connectors only.  It is used with data
1354    /// transports and formats that expect output updates in the form of key/value
1355    /// pairs, where the key typically represents a unique id associated with the
1356    /// table or view.
1357    ///
1358    /// To support such output formats, an output connector can be attached to an
1359    /// index created using the SQL CREATE INDEX statement.  An index of a table
1360    /// or view contains the same updates as the table or view itself, indexed by
1361    /// one or more key columns.
1362    ///
1363    /// See individual connector documentation for details on how they work
1364    /// with indexes.
1365    pub index: Option<String>,
1366
1367    /// Output buffer configuration.
1368    #[serde(flatten)]
1369    pub output_buffer_config: OutputBufferConfig,
1370
1371    /// Maximum batch size, in records.
1372    ///
1373    /// This is the maximum number of records to process in one batch through
1374    /// the circuit.  The time and space cost of processing a batch is
1375    /// asymptotically superlinear in the size of the batch, but very small
1376    /// batches are less efficient due to constant factors.
1377    ///
1378    /// This should usually be less than `max_queued_records`, to give the
1379    /// connector a round-trip time to restart and refill the buffer while
1380    /// batches are being processed.
1381    ///
1382    /// Some input adapters might not honor this setting.
1383    ///
1384    /// The default is 10,000.
1385    #[serde(default = "default_max_batch_size")]
1386    pub max_batch_size: u64,
1387
1388    /// Backpressure threshold.
1389    ///
1390    /// Maximal number of records queued by the endpoint before the endpoint
1391    /// is paused by the backpressure mechanism.
1392    ///
1393    /// For input endpoints, this setting bounds the number of records that have
1394    /// been received from the input transport but haven't yet been consumed by
1395    /// the circuit since the circuit, since the circuit is still busy processing
1396    /// previous inputs.
1397    ///
1398    /// For output endpoints, this setting bounds the number of records that have
1399    /// been produced by the circuit but not yet sent via the output transport endpoint
1400    /// nor stored in the output buffer (see `enable_output_buffer`).
1401    ///
1402    /// Note that this is not a hard bound: there can be a small delay between
1403    /// the backpressure mechanism is triggered and the endpoint is paused, during
1404    /// which more data may be queued.
1405    ///
1406    /// The default is 1 million.
1407    #[serde(default = "default_max_queued_records")]
1408    pub max_queued_records: u64,
1409
1410    /// Create connector in paused state.
1411    ///
1412    /// The default is `false`.
1413    #[serde(default)]
1414    pub paused: bool,
1415
1416    /// Arbitrary user-defined text labels associated with the connector.
1417    ///
1418    /// These labels can be used in conjunction with the `start_after` property
1419    /// to control the start order of connectors.
1420    #[serde(default)]
1421    pub labels: Vec<String>,
1422
1423    /// Start the connector after all connectors with specified labels.
1424    ///
1425    /// This property is used to control the start order of connectors.
1426    /// The connector will not start until all connectors with the specified
1427    /// labels have finished processing all inputs.
1428    #[serde(deserialize_with = "deserialize_start_after")]
1429    #[serde(default)]
1430    pub start_after: Option<Vec<String>>,
1431}
1432
1433impl ConnectorConfig {
1434    /// Compare two configs modulo the `paused` field.
1435    ///
1436    /// Used to compare checkpointed and current connector configs.
1437    pub fn equal_modulo_paused(&self, other: &Self) -> bool {
1438        let mut a = self.clone();
1439        let mut b = other.clone();
1440        a.paused = false;
1441        b.paused = false;
1442        a == b
1443    }
1444}
1445
1446#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1447#[serde(default)]
1448pub struct OutputBufferConfig {
1449    /// Enable output buffering.
1450    ///
1451    /// The output buffering mechanism allows decoupling the rate at which the pipeline
1452    /// pushes changes to the output transport from the rate of input changes.
1453    ///
1454    /// By default, output updates produced by the pipeline are pushed directly to
1455    /// the output transport. Some destinations may prefer to receive updates in fewer
1456    /// bigger batches. For instance, when writing Parquet files, producing
1457    /// one bigger file every few minutes is usually better than creating
1458    /// small files every few milliseconds.
1459    ///
1460    /// To achieve such input/output decoupling, users can enable output buffering by
1461    /// setting the `enable_output_buffer` flag to `true`.  When buffering is enabled, output
1462    /// updates produced by the pipeline are consolidated in an internal buffer and are
1463    /// pushed to the output transport when one of several conditions is satisfied:
1464    ///
1465    /// * data has been accumulated in the buffer for more than `max_output_buffer_time_millis`
1466    ///   milliseconds.
1467    /// * buffer size exceeds `max_output_buffer_size_records` records.
1468    ///
1469    /// This flag is `false` by default.
1470    // TODO: on-demand output triggered via the API.
1471    pub enable_output_buffer: bool,
1472
1473    /// Maximum time in milliseconds data is kept in the output buffer.
1474    ///
1475    /// By default, data is kept in the buffer indefinitely until one of
1476    /// the other output conditions is satisfied.  When this option is
1477    /// set the buffer will be flushed at most every
1478    /// `max_output_buffer_time_millis` milliseconds.
1479    ///
1480    /// NOTE: this configuration option requires the `enable_output_buffer` flag
1481    /// to be set.
1482    pub max_output_buffer_time_millis: usize,
1483
1484    /// Maximum number of updates to be kept in the output buffer.
1485    ///
1486    /// This parameter bounds the maximal size of the buffer.
1487    /// Note that the size of the buffer is not always equal to the
1488    /// total number of updates output by the pipeline. Updates to the
1489    /// same record can overwrite or cancel previous updates.
1490    ///
1491    /// By default, the buffer can grow indefinitely until one of
1492    /// the other output conditions is satisfied.
1493    ///
1494    /// NOTE: this configuration option requires the `enable_output_buffer` flag
1495    /// to be set.
1496    pub max_output_buffer_size_records: usize,
1497}
1498
1499impl Default for OutputBufferConfig {
1500    fn default() -> Self {
1501        Self {
1502            enable_output_buffer: false,
1503            max_output_buffer_size_records: usize::MAX,
1504            max_output_buffer_time_millis: usize::MAX,
1505        }
1506    }
1507}
1508
1509impl OutputBufferConfig {
1510    pub fn validate(&self) -> Result<(), String> {
1511        if self.enable_output_buffer
1512            && self.max_output_buffer_size_records == Self::default().max_output_buffer_size_records
1513            && self.max_output_buffer_time_millis == Self::default().max_output_buffer_time_millis
1514        {
1515            return Err(
1516                "when the 'enable_output_buffer' flag is set, one of 'max_output_buffer_size_records' and 'max_output_buffer_time_millis' settings must be specified"
1517                    .to_string(),
1518            );
1519        }
1520
1521        Ok(())
1522    }
1523}
1524
1525/// Describes an output connector configuration
1526#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1527pub struct OutputEndpointConfig {
1528    /// The name of the output stream of the circuit that this endpoint is
1529    /// connected to.
1530    pub stream: Cow<'static, str>,
1531
1532    /// Connector configuration.
1533    #[serde(flatten)]
1534    pub connector_config: ConnectorConfig,
1535}
1536
1537/// Transport-specific endpoint configuration passed to
1538/// `crate::OutputTransport::new_endpoint`
1539/// and `crate::InputTransport::new_endpoint`.
1540#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
1541#[serde(tag = "name", content = "config", rename_all = "snake_case")]
1542pub enum TransportConfig {
1543    FileInput(FileInputConfig),
1544    FileOutput(FileOutputConfig),
1545    NatsInput(NatsInputConfig),
1546    KafkaInput(KafkaInputConfig),
1547    KafkaOutput(KafkaOutputConfig),
1548    PubSubInput(PubSubInputConfig),
1549    UrlInput(UrlInputConfig),
1550    S3Input(S3InputConfig),
1551    DeltaTableInput(DeltaTableReaderConfig),
1552    DeltaTableOutput(DeltaTableWriterConfig),
1553    RedisOutput(RedisOutputConfig),
1554    // Prevent rust from complaining about large size difference between enum variants.
1555    IcebergInput(Box<IcebergReaderConfig>),
1556    PostgresInput(PostgresReaderConfig),
1557    PostgresOutput(PostgresWriterConfig),
1558    Datagen(DatagenInputConfig),
1559    Nexmark(NexmarkInputConfig),
1560    /// Direct HTTP input: cannot be instantiated through API
1561    HttpInput(HttpInputConfig),
1562    /// Direct HTTP output: cannot be instantiated through API
1563    HttpOutput,
1564    /// Ad hoc input: cannot be instantiated through API
1565    AdHocInput(AdHocInputConfig),
1566    ClockInput(ClockConfig),
1567}
1568
1569impl TransportConfig {
1570    pub fn name(&self) -> String {
1571        match self {
1572            TransportConfig::FileInput(_) => "file_input".to_string(),
1573            TransportConfig::FileOutput(_) => "file_output".to_string(),
1574            TransportConfig::NatsInput(_) => "nats_input".to_string(),
1575            TransportConfig::KafkaInput(_) => "kafka_input".to_string(),
1576            TransportConfig::KafkaOutput(_) => "kafka_output".to_string(),
1577            TransportConfig::PubSubInput(_) => "pub_sub_input".to_string(),
1578            TransportConfig::UrlInput(_) => "url_input".to_string(),
1579            TransportConfig::S3Input(_) => "s3_input".to_string(),
1580            TransportConfig::DeltaTableInput(_) => "delta_table_input".to_string(),
1581            TransportConfig::DeltaTableOutput(_) => "delta_table_output".to_string(),
1582            TransportConfig::IcebergInput(_) => "iceberg_input".to_string(),
1583            TransportConfig::PostgresInput(_) => "postgres_input".to_string(),
1584            TransportConfig::PostgresOutput(_) => "postgres_output".to_string(),
1585            TransportConfig::Datagen(_) => "datagen".to_string(),
1586            TransportConfig::Nexmark(_) => "nexmark".to_string(),
1587            TransportConfig::HttpInput(_) => "http_input".to_string(),
1588            TransportConfig::HttpOutput => "http_output".to_string(),
1589            TransportConfig::AdHocInput(_) => "adhoc_input".to_string(),
1590            TransportConfig::RedisOutput(_) => "redis_output".to_string(),
1591            TransportConfig::ClockInput(_) => "clock".to_string(),
1592        }
1593    }
1594
1595    /// Returns true if the connector is transient, i.e., is created and destroyed
1596    /// at runtime on demand, rather than being configured as part of the pipeline.
1597    pub fn is_transient(&self) -> bool {
1598        matches!(
1599            self,
1600            TransportConfig::AdHocInput(_)
1601                | TransportConfig::HttpInput(_)
1602                | TransportConfig::HttpOutput
1603                | TransportConfig::ClockInput(_)
1604        )
1605    }
1606}
1607
1608/// Data format specification used to parse raw data received from the
1609/// endpoint or to encode data sent to the endpoint.
1610#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize, ToSchema)]
1611pub struct FormatConfig {
1612    /// Format name, e.g., "csv", "json", "bincode", etc.
1613    pub name: Cow<'static, str>,
1614
1615    /// Format-specific parser or encoder configuration.
1616    #[serde(default)]
1617    #[schema(value_type = Object)]
1618    pub config: JsonValue,
1619}
1620
1621#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, ToSchema)]
1622#[serde(default)]
1623pub struct ResourceConfig {
1624    /// The minimum number of CPU cores to reserve
1625    /// for an instance of this pipeline
1626    #[serde(deserialize_with = "crate::serde_via_value::deserialize")]
1627    pub cpu_cores_min: Option<f64>,
1628
1629    /// The maximum number of CPU cores to reserve
1630    /// for an instance of this pipeline
1631    #[serde(deserialize_with = "crate::serde_via_value::deserialize")]
1632    pub cpu_cores_max: Option<f64>,
1633
1634    /// The minimum memory in Megabytes to reserve
1635    /// for an instance of this pipeline
1636    pub memory_mb_min: Option<u64>,
1637
1638    /// The maximum memory in Megabytes to reserve
1639    /// for an instance of this pipeline
1640    pub memory_mb_max: Option<u64>,
1641
1642    /// The total storage in Megabytes to reserve
1643    /// for an instance of this pipeline
1644    pub storage_mb_max: Option<u64>,
1645
1646    /// Storage class to use for an instance of this pipeline.
1647    /// The class determines storage performance such as IOPS and throughput.
1648    pub storage_class: Option<String>,
1649
1650    /// Kubernetes service account name to use for an instance of this pipeline.
1651    /// The account determines permissions and access controls.
1652    pub service_account_name: Option<String>,
1653
1654    /// Kubernetes namespace to use for an instance of this pipeline.
1655    /// The namespace determines the scope of names for resources created
1656    /// for the pipeline.
1657    /// If not set, the pipeline will be deployed in the same namespace
1658    /// as the control-plane.
1659    pub namespace: Option<String>,
1660}