/// ClickHouse configuration options. Detailed description for each set of options
/// is available in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server_settings/settings/>).
///
/// Any options not listed here are not supported.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClickhouseConfig {
/// Logging level for the ClickHouse cluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
#[prost(enumeration = "clickhouse_config::LogLevel", tag = "1")]
pub log_level: i32,
/// Settings for the MergeTree engine.
/// See description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server_settings/settings/#merge_tree>).
#[prost(message, optional, tag = "2")]
pub merge_tree: ::core::option::Option<clickhouse_config::MergeTree>,
/// Compression settings for the ClickHouse cluster.
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server_settings/settings/#compression>).
#[prost(message, repeated, tag = "3")]
pub compression: ::prost::alloc::vec::Vec<clickhouse_config::Compression>,
/// Configuration of external dictionaries to be used by the ClickHouse cluster.
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts/>).
#[prost(message, repeated, tag = "4")]
pub dictionaries: ::prost::alloc::vec::Vec<clickhouse_config::ExternalDictionary>,
/// Settings for thinning Graphite data.
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server_settings/settings/#server_settings-graphite_rollup>).
#[prost(message, repeated, tag = "5")]
pub graphite_rollup: ::prost::alloc::vec::Vec<clickhouse_config::GraphiteRollup>,
#[prost(message, optional, tag = "35")]
pub kafka: ::core::option::Option<clickhouse_config::Kafka>,
#[prost(message, repeated, tag = "36")]
pub kafka_topics: ::prost::alloc::vec::Vec<clickhouse_config::KafkaTopic>,
#[prost(message, optional, tag = "37")]
pub rabbitmq: ::core::option::Option<clickhouse_config::Rabbitmq>,
/// Maximum number of inbound connections.
#[prost(message, optional, tag = "6")]
pub max_connections: ::core::option::Option<i64>,
/// Maximum number of simultaneously processed requests.
#[prost(message, optional, tag = "7")]
pub max_concurrent_queries: ::core::option::Option<i64>,
/// Number of milliseconds that ClickHouse waits for incoming requests before closing the connection.
#[prost(message, optional, tag = "8")]
pub keep_alive_timeout: ::core::option::Option<i64>,
/// Cache size (in bytes) for uncompressed data used by MergeTree tables.
#[prost(message, optional, tag = "9")]
pub uncompressed_cache_size: ::core::option::Option<i64>,
/// Approximate size (in bytes) of the cache of "marks" used by MergeTree tables.
#[prost(message, optional, tag = "10")]
pub mark_cache_size: ::core::option::Option<i64>,
/// Maximum size of the table that can be deleted using a DROP query.
#[prost(message, optional, tag = "11")]
pub max_table_size_to_drop: ::core::option::Option<i64>,
/// Maximum size of the partition that can be deleted using a DROP query.
#[prost(message, optional, tag = "13")]
pub max_partition_size_to_drop: ::core::option::Option<i64>,
/// The setting is deprecated and has no effect.
#[deprecated]
#[prost(message, optional, tag = "12")]
pub builtin_dictionaries_reload_interval: ::core::option::Option<i64>,
/// The server's time zone to be used in DateTime fields conversions. Specified as an IANA identifier.
#[prost(string, tag = "14")]
pub timezone: ::prost::alloc::string::String,
/// Enable or disable geobase.
#[prost(message, optional, tag = "66")]
pub geobase_enabled: ::core::option::Option<bool>,
/// Address of the archive with the user geobase in Object Storage.
#[prost(string, tag = "15")]
pub geobase_uri: ::prost::alloc::string::String,
/// The maximum size that query_log can grow to before old data will be removed. If set to 0, automatic removal of
/// query_log data based on size is disabled.
#[prost(message, optional, tag = "16")]
pub query_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that query_log records will be retained before removal. If set to 0, automatic removal of
/// query_log data based on time is disabled.
#[prost(message, optional, tag = "17")]
pub query_log_retention_time: ::core::option::Option<i64>,
/// Whether query_thread_log system table is enabled.
#[prost(message, optional, tag = "18")]
pub query_thread_log_enabled: ::core::option::Option<bool>,
/// The maximum size that query_thread_log can grow to before old data will be removed. If set to 0, automatic removal of
/// query_thread_log data based on size is disabled.
#[prost(message, optional, tag = "19")]
pub query_thread_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that query_thread_log records will be retained before removal. If set to 0, automatic removal of
/// query_thread_log data based on time is disabled.
#[prost(message, optional, tag = "20")]
pub query_thread_log_retention_time: ::core::option::Option<i64>,
/// The maximum size that part_log can grow to before old data will be removed. If set to 0, automatic removal of
/// part_log data based on size is disabled.
#[prost(message, optional, tag = "21")]
pub part_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that part_log records will be retained before removal. If set to 0, automatic removal of
/// part_log data based on time is disabled.
#[prost(message, optional, tag = "22")]
pub part_log_retention_time: ::core::option::Option<i64>,
/// Whether metric_log system table is enabled.
#[prost(message, optional, tag = "23")]
pub metric_log_enabled: ::core::option::Option<bool>,
/// The maximum size that metric_log can grow to before old data will be removed. If set to 0, automatic removal of
/// metric_log data based on size is disabled.
#[prost(message, optional, tag = "24")]
pub metric_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that metric_log records will be retained before removal. If set to 0, automatic removal of
/// metric_log data based on time is disabled.
#[prost(message, optional, tag = "25")]
pub metric_log_retention_time: ::core::option::Option<i64>,
/// Whether trace_log system table is enabled.
#[prost(message, optional, tag = "26")]
pub trace_log_enabled: ::core::option::Option<bool>,
/// The maximum size that trace_log can grow to before old data will be removed. If set to 0, automatic removal of
/// trace_log data based on size is disabled.
#[prost(message, optional, tag = "27")]
pub trace_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that trace_log records will be retained before removal. If set to 0, automatic removal of
/// trace_log data based on time is disabled.
#[prost(message, optional, tag = "28")]
pub trace_log_retention_time: ::core::option::Option<i64>,
/// Whether text_log system table is enabled.
#[prost(message, optional, tag = "29")]
pub text_log_enabled: ::core::option::Option<bool>,
/// The maximum size that text_log can grow to before old data will be removed. If set to 0, automatic removal of
/// text_log data based on size is disabled.
#[prost(message, optional, tag = "30")]
pub text_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that text_log records will be retained before removal. If set to 0, automatic removal of
/// text_log data based on time is disabled.
#[prost(message, optional, tag = "31")]
pub text_log_retention_time: ::core::option::Option<i64>,
/// Logging level for text_log system table. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR.
#[prost(enumeration = "clickhouse_config::LogLevel", tag = "32")]
pub text_log_level: i32,
/// Enable or disable opentelemetry_span_log system table. Default value: false.
#[prost(message, optional, tag = "42")]
pub opentelemetry_span_log_enabled: ::core::option::Option<bool>,
/// The maximum size that opentelemetry_span_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of opentelemetry_span_log data based on size is disabled.
#[prost(message, optional, tag = "55")]
pub opentelemetry_span_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that opentelemetry_span_log records will be retained before removal. If set to 0,
/// automatic removal of opentelemetry_span_log data based on time is disabled.
#[prost(message, optional, tag = "56")]
pub opentelemetry_span_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable query_views_log system table. Default value: false.
#[prost(message, optional, tag = "49")]
pub query_views_log_enabled: ::core::option::Option<bool>,
/// The maximum size that query_views_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of query_views_log data based on size is disabled.
#[prost(message, optional, tag = "50")]
pub query_views_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that query_views_log records will be retained before removal. If set to 0,
/// automatic removal of query_views_log data based on time is disabled.
#[prost(message, optional, tag = "51")]
pub query_views_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable asynchronous_metric_log system table. Default value: false.
#[prost(message, optional, tag = "52")]
pub asynchronous_metric_log_enabled: ::core::option::Option<bool>,
/// The maximum size that asynchronous_metric_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of asynchronous_metric_log data based on size is disabled.
#[prost(message, optional, tag = "53")]
pub asynchronous_metric_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that asynchronous_metric_log records will be retained before removal. If set to 0,
/// automatic removal of asynchronous_metric_log data based on time is disabled.
#[prost(message, optional, tag = "54")]
pub asynchronous_metric_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable session_log system table. Default value: false.
#[prost(message, optional, tag = "57")]
pub session_log_enabled: ::core::option::Option<bool>,
/// The maximum size that session_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of session_log data based on size is disabled.
#[prost(message, optional, tag = "58")]
pub session_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that session_log records will be retained before removal. If set to 0,
/// automatic removal of session_log data based on time is disabled.
#[prost(message, optional, tag = "59")]
pub session_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable zookeeper_log system table. Default value: false.
#[prost(message, optional, tag = "60")]
pub zookeeper_log_enabled: ::core::option::Option<bool>,
/// The maximum size that zookeeper_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of zookeeper_log data based on size is disabled.
#[prost(message, optional, tag = "61")]
pub zookeeper_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that zookeeper_log records will be retained before removal. If set to 0,
/// automatic removal of zookeeper_log data based on time is disabled.
#[prost(message, optional, tag = "62")]
pub zookeeper_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable asynchronous_insert_log system table. Default value: false.
/// Minimal required ClickHouse version: 22.10.
#[prost(message, optional, tag = "63")]
pub asynchronous_insert_log_enabled: ::core::option::Option<bool>,
/// The maximum size that asynchronous_insert_log can grow to before old data will be removed. If set to 0 (default),
/// automatic removal of asynchronous_insert_log data based on size is disabled.
#[prost(message, optional, tag = "64")]
pub asynchronous_insert_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that asynchronous_insert_log records will be retained before removal. If set to 0,
/// automatic removal of asynchronous_insert_log data based on time is disabled.
#[prost(message, optional, tag = "65")]
pub asynchronous_insert_log_retention_time: ::core::option::Option<i64>,
/// Enable or disable processors_profile_log system table.
#[prost(message, optional, tag = "71")]
pub processors_profile_log_enabled: ::core::option::Option<bool>,
/// The maximum size that processors_profile_log can grow to before old data will be removed.
/// If set to 0 (default), automatic removal of processors_profile_log data based on size is disabled.
#[prost(message, optional, tag = "72")]
pub processors_profile_log_retention_size: ::core::option::Option<i64>,
/// The maximum time that processors_profile_log records will be retained before removal.
/// If set to 0, automatic removal of processors_profile_log data based on time is disabled.
#[prost(message, optional, tag = "73")]
pub processors_profile_log_retention_time: ::core::option::Option<i64>,
#[prost(message, optional, tag = "33")]
pub background_pool_size: ::core::option::Option<i64>,
/// Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and background_pool_size is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server. The same as for background_pool_size setting background_merges_mutations_concurrency_ratio could be applied from the default profile for backward compatibility.
/// Default: 2
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_merges_mutations_concurrency_ratio>)
#[prost(message, optional, tag = "48")]
pub background_merges_mutations_concurrency_ratio: ::core::option::Option<i64>,
#[prost(message, optional, tag = "34")]
pub background_schedule_pool_size: ::core::option::Option<i64>,
/// Sets the number of threads performing background fetches for tables with **ReplicatedMergeTree** engines. Default value: 8.
///
/// More info see in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#background_fetches_pool_size>).
#[prost(message, optional, tag = "38")]
pub background_fetches_pool_size: ::core::option::Option<i64>,
#[prost(message, optional, tag = "39")]
pub background_move_pool_size: ::core::option::Option<i64>,
#[prost(message, optional, tag = "40")]
pub background_distributed_schedule_pool_size: ::core::option::Option<i64>,
#[prost(message, optional, tag = "41")]
pub background_buffer_flush_schedule_pool_size: ::core::option::Option<i64>,
#[prost(message, optional, tag = "46")]
pub background_message_broker_schedule_pool_size: ::core::option::Option<i64>,
/// The maximum number of threads that will be used for performing a variety of operations (mostly garbage collection) for *MergeTree-engine tables in a background.
/// Default: 8
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#background_common_pool_size>)
#[prost(message, optional, tag = "47")]
pub background_common_pool_size: ::core::option::Option<i64>,
/// The default database.
///
/// To get a list of cluster databases, see [Yandex Managed ClickHouse documentation](/docs/managed-clickhouse/operations/databases#list-db).
#[prost(message, optional, tag = "43")]
pub default_database: ::core::option::Option<::prost::alloc::string::String>,
/// Sets the memory size (in bytes) for a stack trace at every peak allocation step. Default value: **4194304**.
///
/// More info see in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#total-memory-profiler-step>).
#[prost(message, optional, tag = "44")]
pub total_memory_profiler_step: ::core::option::Option<i64>,
#[prost(message, optional, tag = "45")]
pub total_memory_tracker_sample_probability: ::core::option::Option<f64>,
/// Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs, system.query_log, system.text_log, system.processes tables, and in logs sent to the client. That allows preventing sensitive data leakage from SQL queries (like names, emails, personal identifiers or credit card numbers) to logs.
/// Change of these settings is applied with ClickHouse restart
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#query-masking-rules>)
#[prost(message, repeated, tag = "67")]
pub query_masking_rules: ::prost::alloc::vec::Vec<
clickhouse_config::QueryMaskingRule,
>,
/// Lazy loading of dictionaries.
/// Default: true
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#dictionaries_lazy_load>)
#[prost(message, optional, tag = "68")]
pub dictionaries_lazy_load: ::core::option::Option<bool>,
/// [Query cache](<https://clickhouse.com/docs/en/operations/query-cache>) configuration.
/// Min version: 23.5
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#query_cache>)
#[prost(message, optional, tag = "69")]
pub query_cache: ::core::option::Option<clickhouse_config::QueryCache>,
/// JDBC bridge for queries to external databases.
/// <https://clickhouse.com/docs/en/integrations/jdbc/jdbc-with-clickhouse>
#[prost(message, optional, tag = "70")]
pub jdbc_bridge: ::core::option::Option<clickhouse_config::JdbcBridge>,
}
/// Nested message and enum types in `ClickhouseConfig`.
pub mod clickhouse_config {
/// Options specific to the MergeTree table engine.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MergeTree {
/// Number of blocks of hashes to keep in ZooKeeper.
#[prost(message, optional, tag = "1")]
pub replicated_deduplication_window: ::core::option::Option<i64>,
/// Period of time to keep blocks of hashes for.
#[prost(message, optional, tag = "2")]
pub replicated_deduplication_window_seconds: ::core::option::Option<i64>,
/// If table contains at least that many active parts in single partition, artificially slow down insert into table.
#[prost(message, optional, tag = "3")]
pub parts_to_delay_insert: ::core::option::Option<i64>,
/// If more than this number active parts in single partition, throw 'Too many parts ...' exception.
#[prost(message, optional, tag = "4")]
pub parts_to_throw_insert: ::core::option::Option<i64>,
#[prost(message, optional, tag = "9")]
pub inactive_parts_to_delay_insert: ::core::option::Option<i64>,
#[prost(message, optional, tag = "10")]
pub inactive_parts_to_throw_insert: ::core::option::Option<i64>,
/// How many tasks of merging and mutating parts are allowed simultaneously in ReplicatedMergeTree queue.
#[prost(message, optional, tag = "5")]
pub max_replicated_merges_in_queue: ::core::option::Option<i64>,
/// If there is less than specified number of free entries in background pool (or replicated queue), start to lower
/// maximum size of merge to process.
#[prost(message, optional, tag = "6")]
pub number_of_free_entries_in_pool_to_lower_max_size_of_merge: ::core::option::Option<
i64,
>,
/// Maximum in total size of parts to merge, when there are minimum free threads in background pool (or entries
/// in replication queue).
#[prost(message, optional, tag = "7")]
pub max_bytes_to_merge_at_min_space_in_pool: ::core::option::Option<i64>,
#[prost(message, optional, tag = "8")]
pub max_bytes_to_merge_at_max_space_in_pool: ::core::option::Option<i64>,
/// Minimum number of bytes in a data part that can be stored in **Wide** format.
///
/// More info see in [ClickHouse documentation](<https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part>).
#[prost(message, optional, tag = "11")]
pub min_bytes_for_wide_part: ::core::option::Option<i64>,
/// Minimum number of rows in a data part that can be stored in **Wide** format.
///
/// More info see in [ClickHouse documentation](<https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#min_bytes_for_wide_part>).
#[prost(message, optional, tag = "12")]
pub min_rows_for_wide_part: ::core::option::Option<i64>,
/// Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables.
///
/// More info see in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/settings/#ttl_only_drop_parts>).
#[prost(message, optional, tag = "13")]
pub ttl_only_drop_parts: ::core::option::Option<bool>,
#[prost(message, optional, tag = "14")]
pub allow_remote_fs_zero_copy_replication: ::core::option::Option<bool>,
#[prost(message, optional, tag = "15")]
pub merge_with_ttl_timeout: ::core::option::Option<i64>,
#[prost(message, optional, tag = "16")]
pub merge_with_recompression_ttl_timeout: ::core::option::Option<i64>,
#[prost(message, optional, tag = "17")]
pub max_parts_in_total: ::core::option::Option<i64>,
#[prost(message, optional, tag = "18")]
pub max_number_of_merges_with_ttl_in_pool: ::core::option::Option<i64>,
#[prost(message, optional, tag = "19")]
pub cleanup_delay_period: ::core::option::Option<i64>,
#[prost(message, optional, tag = "20")]
pub number_of_free_entries_in_pool_to_execute_mutation: ::core::option::Option<
i64,
>,
/// The 'too many parts' check according to 'parts_to_delay_insert' and 'parts_to_throw_insert' will be active only if the average part size (in the relevant partition) is not larger than the specified threshold. If it is larger than the specified threshold, the INSERTs will be neither delayed or rejected. This allows to have hundreds of terabytes in a single table on a single server if the parts are successfully merged to larger parts. This does not affect the thresholds on inactive parts or total parts.
/// Default: 1 GiB
/// Min version: 22.10
/// See in-depth description in [ClickHouse GitHub](<https://github.com/ClickHouse/ClickHouse/blob/f9558345e886876b9132d9c018e357f7fa9b22a3/src/Storages/MergeTree/MergeTreeSettings.h#L80>)
#[prost(message, optional, tag = "21")]
pub max_avg_part_size_for_too_many_parts: ::core::option::Option<i64>,
/// Merge parts if every part in the range is older than the value of min_age_to_force_merge_seconds.
/// Default: 0 - disabled
/// Min_version: 22.10
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds>)
#[prost(message, optional, tag = "22")]
pub min_age_to_force_merge_seconds: ::core::option::Option<i64>,
/// Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.
/// Default: false
/// Min_version: 22.11
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds>)
#[prost(message, optional, tag = "23")]
pub min_age_to_force_merge_on_partition_only: ::core::option::Option<bool>,
/// Sleep time for merge selecting when no part is selected. A lower setting triggers selecting tasks in background_schedule_pool frequently, which results in a large number of requests to ClickHouse Keeper in large-scale clusters.
/// Default: 5000
/// Min_version: 21.10
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/settings#merge_selecting_sleep_ms>)
#[prost(message, optional, tag = "24")]
pub merge_selecting_sleep_ms: ::core::option::Option<i64>,
/// The number of rows that are read from the merged parts into memory.
/// Default: 8192
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/settings#merge_max_block_size>)
#[prost(message, optional, tag = "25")]
pub merge_max_block_size: ::core::option::Option<i64>,
/// Enables the check at table creation, that the data type of a column for sampling or sampling expression is correct. The data type must be one of unsigned [integer types](<https://clickhouse.com/docs/en/sql-reference/data-types/int-uint>): UInt8, UInt16, UInt32, UInt64.
/// Default: true
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#check_sample_column_is_correct>)
#[prost(message, optional, tag = "26")]
pub check_sample_column_is_correct: ::core::option::Option<bool>,
/// Maximum sleep time for merge selecting, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters.
/// Default: 60000
/// Min_version: 23.6
/// See in-depth description in [ClickHouse GitHub](<https://github.com/ClickHouse/ClickHouse/blob/4add9db84859bff7410cf934a3904b0414e36e51/src/Storages/MergeTree/MergeTreeSettings.h#L71>)
#[prost(message, optional, tag = "27")]
pub max_merge_selecting_sleep_ms: ::core::option::Option<i64>,
/// Maximum period to clean old queue logs, blocks hashes and parts.
/// Default: 300
/// Min_version: 23.6
/// See in-depth description in [ClickHouse GitHub](<https://github.com/ClickHouse/ClickHouse/blob/4add9db84859bff7410cf934a3904b0414e36e51/src/Storages/MergeTree/MergeTreeSettings.h#L142>)
#[prost(message, optional, tag = "28")]
pub max_cleanup_delay_period: ::core::option::Option<i64>,
/// Determines the behavior of background merges for MergeTree tables with projections.
/// <https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#deduplicate_merge_projection_mode>
#[prost(enumeration = "merge_tree::DeduplicateMergeProjectionMode", tag = "29")]
pub deduplicate_merge_projection_mode: i32,
/// Determines the behavior of lightweight deletes for MergeTree tables with projections.
#[prost(
enumeration = "merge_tree::LightweightMutationProjectionMode",
tag = "30"
)]
pub lightweight_mutation_projection_mode: i32,
/// Only recalculate ttl info when MATERIALIZE TTL.
#[prost(message, optional, tag = "31")]
pub materialize_ttl_recalculate_only: ::core::option::Option<bool>,
}
/// Nested message and enum types in `MergeTree`.
pub mod merge_tree {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum DeduplicateMergeProjectionMode {
Unspecified = 0,
Ignore = 1,
Throw = 2,
Drop = 3,
Rebuild = 4,
}
impl DeduplicateMergeProjectionMode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
DeduplicateMergeProjectionMode::Unspecified => {
"DEDUPLICATE_MERGE_PROJECTION_MODE_UNSPECIFIED"
}
DeduplicateMergeProjectionMode::Ignore => {
"DEDUPLICATE_MERGE_PROJECTION_MODE_IGNORE"
}
DeduplicateMergeProjectionMode::Throw => {
"DEDUPLICATE_MERGE_PROJECTION_MODE_THROW"
}
DeduplicateMergeProjectionMode::Drop => {
"DEDUPLICATE_MERGE_PROJECTION_MODE_DROP"
}
DeduplicateMergeProjectionMode::Rebuild => {
"DEDUPLICATE_MERGE_PROJECTION_MODE_REBUILD"
}
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"DEDUPLICATE_MERGE_PROJECTION_MODE_UNSPECIFIED" => {
Some(Self::Unspecified)
}
"DEDUPLICATE_MERGE_PROJECTION_MODE_IGNORE" => Some(Self::Ignore),
"DEDUPLICATE_MERGE_PROJECTION_MODE_THROW" => Some(Self::Throw),
"DEDUPLICATE_MERGE_PROJECTION_MODE_DROP" => Some(Self::Drop),
"DEDUPLICATE_MERGE_PROJECTION_MODE_REBUILD" => Some(Self::Rebuild),
_ => None,
}
}
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum LightweightMutationProjectionMode {
Unspecified = 0,
Throw = 1,
Drop = 2,
Rebuild = 3,
}
impl LightweightMutationProjectionMode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
LightweightMutationProjectionMode::Unspecified => {
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_UNSPECIFIED"
}
LightweightMutationProjectionMode::Throw => {
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_THROW"
}
LightweightMutationProjectionMode::Drop => {
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_DROP"
}
LightweightMutationProjectionMode::Rebuild => {
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_REBUILD"
}
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_UNSPECIFIED" => {
Some(Self::Unspecified)
}
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_THROW" => Some(Self::Throw),
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_DROP" => Some(Self::Drop),
"LIGHTWEIGHT_MUTATION_PROJECTION_MODE_REBUILD" => Some(Self::Rebuild),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Kafka {
#[prost(enumeration = "kafka::SecurityProtocol", tag = "1")]
pub security_protocol: i32,
#[prost(enumeration = "kafka::SaslMechanism", tag = "2")]
pub sasl_mechanism: i32,
#[prost(string, tag = "3")]
pub sasl_username: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub sasl_password: ::prost::alloc::string::String,
#[prost(message, optional, tag = "5")]
pub enable_ssl_certificate_verification: ::core::option::Option<bool>,
#[prost(message, optional, tag = "6")]
pub max_poll_interval_ms: ::core::option::Option<i64>,
#[prost(message, optional, tag = "7")]
pub session_timeout_ms: ::core::option::Option<i64>,
#[prost(enumeration = "kafka::Debug", tag = "8")]
pub debug: i32,
#[prost(enumeration = "kafka::AutoOffsetReset", tag = "9")]
pub auto_offset_reset: i32,
}
/// Nested message and enum types in `Kafka`.
pub mod kafka {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum SecurityProtocol {
Unspecified = 0,
Plaintext = 1,
Ssl = 2,
SaslPlaintext = 3,
SaslSsl = 4,
}
impl SecurityProtocol {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
SecurityProtocol::Unspecified => "SECURITY_PROTOCOL_UNSPECIFIED",
SecurityProtocol::Plaintext => "SECURITY_PROTOCOL_PLAINTEXT",
SecurityProtocol::Ssl => "SECURITY_PROTOCOL_SSL",
SecurityProtocol::SaslPlaintext => "SECURITY_PROTOCOL_SASL_PLAINTEXT",
SecurityProtocol::SaslSsl => "SECURITY_PROTOCOL_SASL_SSL",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"SECURITY_PROTOCOL_UNSPECIFIED" => Some(Self::Unspecified),
"SECURITY_PROTOCOL_PLAINTEXT" => Some(Self::Plaintext),
"SECURITY_PROTOCOL_SSL" => Some(Self::Ssl),
"SECURITY_PROTOCOL_SASL_PLAINTEXT" => Some(Self::SaslPlaintext),
"SECURITY_PROTOCOL_SASL_SSL" => Some(Self::SaslSsl),
_ => None,
}
}
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum SaslMechanism {
Unspecified = 0,
Gssapi = 1,
Plain = 2,
ScramSha256 = 3,
ScramSha512 = 4,
}
impl SaslMechanism {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
SaslMechanism::Unspecified => "SASL_MECHANISM_UNSPECIFIED",
SaslMechanism::Gssapi => "SASL_MECHANISM_GSSAPI",
SaslMechanism::Plain => "SASL_MECHANISM_PLAIN",
SaslMechanism::ScramSha256 => "SASL_MECHANISM_SCRAM_SHA_256",
SaslMechanism::ScramSha512 => "SASL_MECHANISM_SCRAM_SHA_512",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"SASL_MECHANISM_UNSPECIFIED" => Some(Self::Unspecified),
"SASL_MECHANISM_GSSAPI" => Some(Self::Gssapi),
"SASL_MECHANISM_PLAIN" => Some(Self::Plain),
"SASL_MECHANISM_SCRAM_SHA_256" => Some(Self::ScramSha256),
"SASL_MECHANISM_SCRAM_SHA_512" => Some(Self::ScramSha512),
_ => None,
}
}
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum Debug {
Unspecified = 0,
Generic = 1,
Broker = 2,
Topic = 3,
Metadata = 4,
Feature = 5,
Queue = 6,
Msg = 7,
Protocol = 8,
Cgrp = 9,
Security = 10,
Fetch = 11,
Interceptor = 12,
Plugin = 13,
Consumer = 14,
Admin = 15,
Eos = 16,
Mock = 17,
Assignor = 18,
Conf = 19,
Telemetry = 20,
All = 21,
}
impl Debug {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Debug::Unspecified => "DEBUG_UNSPECIFIED",
Debug::Generic => "DEBUG_GENERIC",
Debug::Broker => "DEBUG_BROKER",
Debug::Topic => "DEBUG_TOPIC",
Debug::Metadata => "DEBUG_METADATA",
Debug::Feature => "DEBUG_FEATURE",
Debug::Queue => "DEBUG_QUEUE",
Debug::Msg => "DEBUG_MSG",
Debug::Protocol => "DEBUG_PROTOCOL",
Debug::Cgrp => "DEBUG_CGRP",
Debug::Security => "DEBUG_SECURITY",
Debug::Fetch => "DEBUG_FETCH",
Debug::Interceptor => "DEBUG_INTERCEPTOR",
Debug::Plugin => "DEBUG_PLUGIN",
Debug::Consumer => "DEBUG_CONSUMER",
Debug::Admin => "DEBUG_ADMIN",
Debug::Eos => "DEBUG_EOS",
Debug::Mock => "DEBUG_MOCK",
Debug::Assignor => "DEBUG_ASSIGNOR",
Debug::Conf => "DEBUG_CONF",
Debug::Telemetry => "DEBUG_TELEMETRY",
Debug::All => "DEBUG_ALL",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"DEBUG_UNSPECIFIED" => Some(Self::Unspecified),
"DEBUG_GENERIC" => Some(Self::Generic),
"DEBUG_BROKER" => Some(Self::Broker),
"DEBUG_TOPIC" => Some(Self::Topic),
"DEBUG_METADATA" => Some(Self::Metadata),
"DEBUG_FEATURE" => Some(Self::Feature),
"DEBUG_QUEUE" => Some(Self::Queue),
"DEBUG_MSG" => Some(Self::Msg),
"DEBUG_PROTOCOL" => Some(Self::Protocol),
"DEBUG_CGRP" => Some(Self::Cgrp),
"DEBUG_SECURITY" => Some(Self::Security),
"DEBUG_FETCH" => Some(Self::Fetch),
"DEBUG_INTERCEPTOR" => Some(Self::Interceptor),
"DEBUG_PLUGIN" => Some(Self::Plugin),
"DEBUG_CONSUMER" => Some(Self::Consumer),
"DEBUG_ADMIN" => Some(Self::Admin),
"DEBUG_EOS" => Some(Self::Eos),
"DEBUG_MOCK" => Some(Self::Mock),
"DEBUG_ASSIGNOR" => Some(Self::Assignor),
"DEBUG_CONF" => Some(Self::Conf),
"DEBUG_TELEMETRY" => Some(Self::Telemetry),
"DEBUG_ALL" => Some(Self::All),
_ => None,
}
}
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum AutoOffsetReset {
Unspecified = 0,
Smallest = 1,
Earliest = 2,
Beginning = 3,
Largest = 4,
Latest = 5,
End = 6,
Error = 7,
}
impl AutoOffsetReset {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
AutoOffsetReset::Unspecified => "AUTO_OFFSET_RESET_UNSPECIFIED",
AutoOffsetReset::Smallest => "AUTO_OFFSET_RESET_SMALLEST",
AutoOffsetReset::Earliest => "AUTO_OFFSET_RESET_EARLIEST",
AutoOffsetReset::Beginning => "AUTO_OFFSET_RESET_BEGINNING",
AutoOffsetReset::Largest => "AUTO_OFFSET_RESET_LARGEST",
AutoOffsetReset::Latest => "AUTO_OFFSET_RESET_LATEST",
AutoOffsetReset::End => "AUTO_OFFSET_RESET_END",
AutoOffsetReset::Error => "AUTO_OFFSET_RESET_ERROR",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"AUTO_OFFSET_RESET_UNSPECIFIED" => Some(Self::Unspecified),
"AUTO_OFFSET_RESET_SMALLEST" => Some(Self::Smallest),
"AUTO_OFFSET_RESET_EARLIEST" => Some(Self::Earliest),
"AUTO_OFFSET_RESET_BEGINNING" => Some(Self::Beginning),
"AUTO_OFFSET_RESET_LARGEST" => Some(Self::Largest),
"AUTO_OFFSET_RESET_LATEST" => Some(Self::Latest),
"AUTO_OFFSET_RESET_END" => Some(Self::End),
"AUTO_OFFSET_RESET_ERROR" => Some(Self::Error),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct KafkaTopic {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub settings: ::core::option::Option<Kafka>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Rabbitmq {
/// \[RabbitMQ\](<https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/>) username
#[prost(string, tag = "1")]
pub username: ::prost::alloc::string::String,
/// \[RabbitMQ\](<https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/>) password
#[prost(string, tag = "2")]
pub password: ::prost::alloc::string::String,
/// \[RabbitMQ\](<https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq/>) virtual host
#[prost(string, tag = "3")]
pub vhost: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Compression {
/// Compression method to use for the specified combination of \[min_part_size\] and \[min_part_size_ratio\].
#[prost(enumeration = "compression::Method", tag = "1")]
pub method: i32,
/// Minimum size of a part of a table.
#[prost(int64, tag = "2")]
pub min_part_size: i64,
/// Minimum ratio of a part relative to the size of all the data in the table.
#[prost(double, tag = "3")]
pub min_part_size_ratio: f64,
#[prost(message, optional, tag = "4")]
pub level: ::core::option::Option<i64>,
}
/// Nested message and enum types in `Compression`.
pub mod compression {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum Method {
Unspecified = 0,
/// [LZ4 compression algorithm](<https://lz4.github.io/lz4/>).
Lz4 = 1,
/// [Zstandard compression algorithm](<https://facebook.github.io/zstd/>).
Zstd = 2,
}
impl Method {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Method::Unspecified => "METHOD_UNSPECIFIED",
Method::Lz4 => "LZ4",
Method::Zstd => "ZSTD",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"METHOD_UNSPECIFIED" => Some(Self::Unspecified),
"LZ4" => Some(Self::Lz4),
"ZSTD" => Some(Self::Zstd),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExternalDictionary {
/// Name of the external dictionary.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Set of attributes for the external dictionary.
/// For in-depth description, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/>).
#[prost(message, optional, tag = "2")]
pub structure: ::core::option::Option<external_dictionary::Structure>,
/// Layout for storing the dictionary in memory.
/// For in-depth description, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/>).
#[prost(message, optional, tag = "3")]
pub layout: ::core::option::Option<external_dictionary::Layout>,
/// Setting for the period of time between dictionary updates.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/>).
#[prost(oneof = "external_dictionary::Lifetime", tags = "4, 5")]
pub lifetime: ::core::option::Option<external_dictionary::Lifetime>,
/// Description of the source for the external dictionary.
#[prost(oneof = "external_dictionary::Source", tags = "6, 7, 8, 9, 10")]
pub source: ::core::option::Option<external_dictionary::Source>,
}
/// Nested message and enum types in `ExternalDictionary`.
pub mod external_dictionary {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HttpSource {
/// URL of the source dictionary available over HTTP.
#[prost(string, tag = "1")]
pub url: ::prost::alloc::string::String,
/// The data format. Valid values are all formats supported by ClickHouse SQL dialect.
#[prost(string, tag = "2")]
pub format: ::prost::alloc::string::String,
/// HTTP headers.
#[prost(message, repeated, tag = "3")]
pub headers: ::prost::alloc::vec::Vec<http_source::Header>,
}
/// Nested message and enum types in `HttpSource`.
pub mod http_source {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Header {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub value: ::prost::alloc::string::String,
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MysqlSource {
/// Name of the MySQL database to connect to.
#[prost(string, tag = "1")]
pub db: ::prost::alloc::string::String,
/// Name of the database table to use as a ClickHouse dictionary.
#[prost(string, tag = "2")]
pub table: ::prost::alloc::string::String,
/// Default port to use when connecting to a replica of the dictionary source.
#[prost(int64, tag = "3")]
pub port: i64,
/// Name of the default user for replicas of the dictionary source.
#[prost(string, tag = "4")]
pub user: ::prost::alloc::string::String,
/// Password of the default user for replicas of the dictionary source.
#[prost(string, tag = "5")]
pub password: ::prost::alloc::string::String,
/// List of MySQL replicas of the database used as dictionary source.
#[prost(message, repeated, tag = "6")]
pub replicas: ::prost::alloc::vec::Vec<mysql_source::Replica>,
/// Selection criteria for the data in the specified MySQL table.
#[prost(string, tag = "7")]
pub r#where: ::prost::alloc::string::String,
/// Query for checking the dictionary status, to pull only updated data.
/// For more details, see [ClickHouse documentation on dictionaries](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/>).
#[prost(string, tag = "8")]
pub invalidate_query: ::prost::alloc::string::String,
/// Should the connection be closed after each request.
#[prost(message, optional, tag = "9")]
pub close_connection: ::core::option::Option<bool>,
/// Should a connection be shared for some requests.
#[prost(message, optional, tag = "10")]
pub share_connection: ::core::option::Option<bool>,
}
/// Nested message and enum types in `MysqlSource`.
pub mod mysql_source {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Replica {
/// MySQL host of the replica.
#[prost(string, tag = "1")]
pub host: ::prost::alloc::string::String,
/// The priority of the replica that ClickHouse takes into account when connecting.
/// Replica with the highest priority should have this field set to the lowest number.
#[prost(int64, tag = "2")]
pub priority: i64,
/// Port to use when connecting to the replica.
/// If a port is not specified for a replica, ClickHouse uses the port specified for the source.
#[prost(int64, tag = "3")]
pub port: i64,
/// Name of the MySQL database user.
#[prost(string, tag = "4")]
pub user: ::prost::alloc::string::String,
/// Password of the MySQL database user.
#[prost(string, tag = "5")]
pub password: ::prost::alloc::string::String,
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClickhouseSource {
/// Name of the ClickHouse database.
#[prost(string, tag = "1")]
pub db: ::prost::alloc::string::String,
/// Name of the table in the specified database to be used as the dictionary source.
#[prost(string, tag = "2")]
pub table: ::prost::alloc::string::String,
/// ClickHouse host of the specified database.
#[prost(string, tag = "3")]
pub host: ::prost::alloc::string::String,
/// Port to use when connecting to the host.
#[prost(int64, tag = "4")]
pub port: i64,
/// Name of the ClickHouse database user.
#[prost(string, tag = "5")]
pub user: ::prost::alloc::string::String,
/// Password of the ClickHouse database user.
#[prost(string, tag = "6")]
pub password: ::prost::alloc::string::String,
/// Selection criteria for the data in the specified ClickHouse table.
#[prost(string, tag = "7")]
pub r#where: ::prost::alloc::string::String,
/// Use ssl for connection.
#[prost(message, optional, tag = "8")]
pub secure: ::core::option::Option<bool>,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct MongodbSource {
/// Name of the MongoDB database.
#[prost(string, tag = "1")]
pub db: ::prost::alloc::string::String,
/// Name of the collection in the specified database to be used as the dictionary source.
#[prost(string, tag = "2")]
pub collection: ::prost::alloc::string::String,
/// MongoDB host of the specified database.
#[prost(string, tag = "3")]
pub host: ::prost::alloc::string::String,
/// Port to use when connecting to the host.
#[prost(int64, tag = "4")]
pub port: i64,
/// Name of the MongoDB database user.
#[prost(string, tag = "5")]
pub user: ::prost::alloc::string::String,
/// Password of the MongoDB database user.
#[prost(string, tag = "6")]
pub password: ::prost::alloc::string::String,
#[prost(string, tag = "7")]
pub options: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PostgresqlSource {
/// Name of the PostrgreSQL database.
#[prost(string, tag = "1")]
pub db: ::prost::alloc::string::String,
/// Name of the table in the specified database to be used as the dictionary source.
#[prost(string, tag = "2")]
pub table: ::prost::alloc::string::String,
/// Name of the PostrgreSQL host
#[prost(string, repeated, tag = "3")]
pub hosts: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Port to use when connecting to the host.
#[prost(int64, tag = "4")]
pub port: i64,
/// Name of the PostrgreSQL database user.
#[prost(string, tag = "5")]
pub user: ::prost::alloc::string::String,
/// Password of the PostrgreSQL database user.
#[prost(string, tag = "6")]
pub password: ::prost::alloc::string::String,
/// Query for checking the dictionary status, to pull only updated data.
/// For more details, see [ClickHouse documentation on dictionaries](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/>).
#[prost(string, tag = "7")]
pub invalidate_query: ::prost::alloc::string::String,
/// Mode of SSL TCP/IP connection to the PostgreSQL host.
/// For more details, see [PostgreSQL documentation](<https://www.postgresql.org/docs/current/libpq-ssl.html>).
#[prost(enumeration = "postgresql_source::SslMode", tag = "8")]
pub ssl_mode: i32,
}
/// Nested message and enum types in `PostgresqlSource`.
pub mod postgresql_source {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum SslMode {
Unspecified = 0,
/// Only try a non-SSL connection.
Disable = 1,
/// First try a non-SSL connection; if that fails, try an SSL connection.
Allow = 2,
/// First try an SSL connection; if that fails, try a non-SSL connection.
Prefer = 3,
/// Only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA).
VerifyCa = 4,
/// Only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate.
VerifyFull = 5,
}
impl SslMode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
SslMode::Unspecified => "SSL_MODE_UNSPECIFIED",
SslMode::Disable => "DISABLE",
SslMode::Allow => "ALLOW",
SslMode::Prefer => "PREFER",
SslMode::VerifyCa => "VERIFY_CA",
SslMode::VerifyFull => "VERIFY_FULL",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"SSL_MODE_UNSPECIFIED" => Some(Self::Unspecified),
"DISABLE" => Some(Self::Disable),
"ALLOW" => Some(Self::Allow),
"PREFER" => Some(Self::Prefer),
"VERIFY_CA" => Some(Self::VerifyCa),
"VERIFY_FULL" => Some(Self::VerifyFull),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Structure {
/// Single numeric key column for the dictionary.
#[prost(message, optional, tag = "1")]
pub id: ::core::option::Option<structure::Id>,
/// Composite key for the dictionary, containing of one or more key columns.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#composite-key>).
#[prost(message, optional, tag = "3")]
pub key: ::core::option::Option<structure::Key>,
/// Field holding the beginning of the range for dictionaries with `RANGE_HASHED` layout.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed>).
#[prost(message, optional, tag = "4")]
pub range_min: ::core::option::Option<structure::Attribute>,
/// Field holding the end of the range for dictionaries with `RANGE_HASHED` layout.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed>).
#[prost(message, optional, tag = "5")]
pub range_max: ::core::option::Option<structure::Attribute>,
/// Description of the fields available for database queries.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#attributes>).
#[prost(message, repeated, tag = "2")]
pub attributes: ::prost::alloc::vec::Vec<structure::Attribute>,
}
/// Nested message and enum types in `Structure`.
pub mod structure {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Attribute {
/// Name of the column.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Type of the column.
#[prost(string, tag = "2")]
pub r#type: ::prost::alloc::string::String,
/// Default value for an element without data (for example, an empty string).
#[prost(string, tag = "3")]
pub null_value: ::prost::alloc::string::String,
/// Expression, describing the attribute, if applicable.
#[prost(string, tag = "4")]
pub expression: ::prost::alloc::string::String,
/// Indication of hierarchy support.
/// Default value: `false`.
#[prost(bool, tag = "5")]
pub hierarchical: bool,
/// Indication of injective mapping "id -> attribute".
/// Default value: `false`.
#[prost(bool, tag = "6")]
pub injective: bool,
}
/// Numeric key.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Id {
/// Name of the numeric key.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
/// Complex key.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Key {
/// Attributes of a complex key.
#[prost(message, repeated, tag = "1")]
pub attributes: ::prost::alloc::vec::Vec<Attribute>,
}
}
/// Layout determining how to store the dictionary in memory.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Layout {
/// Layout type for an external dictionary.
#[prost(enumeration = "layout::Type", tag = "1")]
pub r#type: i32,
/// Number of cells in the cache. Rounded up to a power of two.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(int64, tag = "2")]
pub size_in_cells: i64,
/// Allows to read expired keys.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(message, optional, tag = "5")]
pub allow_read_expired_keys: ::core::option::Option<bool>,
/// Max size of update queue.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(int64, tag = "6")]
pub max_update_queue_size: i64,
/// Max timeout in milliseconds for push update task into queue.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(int64, tag = "7")]
pub update_queue_push_timeout_milliseconds: i64,
/// Max wait timeout in milliseconds for update task to complete.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(int64, tag = "8")]
pub query_wait_timeout_milliseconds: i64,
/// Max threads for cache dictionary update.
/// Applicable only for CACHE and COMPLEX_KEY_CACHE layout types.
#[prost(int64, tag = "9")]
pub max_threads_for_updates: i64,
/// Initial dictionary key size.
/// Applicable only for FLAT layout type.
#[prost(int64, tag = "10")]
pub initial_array_size: i64,
/// Maximum dictionary key size.
/// Applicable only for FLAT layout type.
#[prost(int64, tag = "3")]
pub max_array_size: i64,
/// Allows to retrieve key attribute using dictGetString function.
/// Enabling this option increases memory usage.
/// Applicable only for IP_TRIE layout type.
#[prost(message, optional, tag = "4")]
pub access_to_key_from_attributes: ::core::option::Option<bool>,
}
/// Nested message and enum types in `Layout`.
pub mod layout {
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum Type {
Unspecified = 0,
/// The entire dictionary is stored in memory in the form of flat arrays.
/// Available for all dictionary sources.
Flat = 1,
/// The entire dictionary is stored in memory in the form of a hash table.
/// Available for all dictionary sources.
Hashed = 2,
/// Similar to HASHED, to be used with composite keys.
/// Available for all dictionary sources.
ComplexKeyHashed = 3,
/// The entire dictionary is stored in memory in the form of a hash table,
/// with an ordered array of ranges and their corresponding values.
/// Available for all dictionary sources.
RangeHashed = 4,
/// The dictionary is stored in a cache with a set number of cells.
/// Available for MySQL, ClickHouse and HTTP dictionary sources.
Cache = 5,
/// Similar to CACHE, to be used with composite keys.
/// Available for MySQL, ClickHouse and HTTP dictionary sources.
ComplexKeyCache = 6,
/// Similar to HASHED, but uses less memory in favor of more CPU usage.
SparseHashed = 7,
/// Similar to SPARSE_HASHED, to be used with composite keys.
ComplexKeySparseHashed = 8,
/// Similar to RANGE_HASHED, to be used with composite keys.
ComplexKeyRangeHashed = 9,
/// The dictionary is not stored in memory and directly goes to the source during the processing of a request.
Direct = 10,
/// Similar to DIRECT, to be used with composite keys.
ComplexKeyDirect = 11,
/// The specialized layout type for mapping network prefixes (IP addresses) to metadata such as ASN.
IpTrie = 12,
}
impl Type {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Type::Unspecified => "TYPE_UNSPECIFIED",
Type::Flat => "FLAT",
Type::Hashed => "HASHED",
Type::ComplexKeyHashed => "COMPLEX_KEY_HASHED",
Type::RangeHashed => "RANGE_HASHED",
Type::Cache => "CACHE",
Type::ComplexKeyCache => "COMPLEX_KEY_CACHE",
Type::SparseHashed => "SPARSE_HASHED",
Type::ComplexKeySparseHashed => "COMPLEX_KEY_SPARSE_HASHED",
Type::ComplexKeyRangeHashed => "COMPLEX_KEY_RANGE_HASHED",
Type::Direct => "DIRECT",
Type::ComplexKeyDirect => "COMPLEX_KEY_DIRECT",
Type::IpTrie => "IP_TRIE",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"TYPE_UNSPECIFIED" => Some(Self::Unspecified),
"FLAT" => Some(Self::Flat),
"HASHED" => Some(Self::Hashed),
"COMPLEX_KEY_HASHED" => Some(Self::ComplexKeyHashed),
"RANGE_HASHED" => Some(Self::RangeHashed),
"CACHE" => Some(Self::Cache),
"COMPLEX_KEY_CACHE" => Some(Self::ComplexKeyCache),
"SPARSE_HASHED" => Some(Self::SparseHashed),
"COMPLEX_KEY_SPARSE_HASHED" => Some(Self::ComplexKeySparseHashed),
"COMPLEX_KEY_RANGE_HASHED" => Some(Self::ComplexKeyRangeHashed),
"DIRECT" => Some(Self::Direct),
"COMPLEX_KEY_DIRECT" => Some(Self::ComplexKeyDirect),
"IP_TRIE" => Some(Self::IpTrie),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Range {
/// Minimum dictionary lifetime.
#[prost(int64, tag = "1")]
pub min: i64,
/// Maximum dictionary lifetime.
#[prost(int64, tag = "2")]
pub max: i64,
}
/// Setting for the period of time between dictionary updates.
/// For details, see [ClickHouse documentation](<https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/>).
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Lifetime {
/// Fixed interval between dictionary updates.
#[prost(int64, tag = "4")]
FixedLifetime(i64),
/// Range of intervals between dictionary updates for ClickHouse to choose from.
#[prost(message, tag = "5")]
LifetimeRange(Range),
}
/// Description of the source for the external dictionary.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Source {
/// HTTP source for the dictionary.
#[prost(message, tag = "6")]
HttpSource(HttpSource),
/// MySQL source for the dictionary.
#[prost(message, tag = "7")]
MysqlSource(MysqlSource),
/// ClickHouse source for the dictionary.
#[prost(message, tag = "8")]
ClickhouseSource(ClickhouseSource),
/// MongoDB source for the dictionary.
#[prost(message, tag = "9")]
MongodbSource(MongodbSource),
/// PostgreSQL source for the dictionary.
#[prost(message, tag = "10")]
PostgresqlSource(PostgresqlSource),
}
}
/// Rollup settings for the GraphiteMergeTree table engine.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GraphiteRollup {
/// Name for the specified combination of settings for Graphite rollup.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Pattern to use for the rollup.
#[prost(message, repeated, tag = "2")]
pub patterns: ::prost::alloc::vec::Vec<graphite_rollup::Pattern>,
/// The name of the column storing the metric name (Graphite sensor).
/// Default: Path
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns>)
#[prost(string, tag = "3")]
pub path_column_name: ::prost::alloc::string::String,
/// The name of the column storing the time of measuring the metric.
/// Default: Time
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns>)
#[prost(string, tag = "4")]
pub time_column_name: ::prost::alloc::string::String,
/// The name of the column storing the value of the metric at the time set in time_column_name.
/// Default: Value
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns>)
#[prost(string, tag = "5")]
pub value_column_name: ::prost::alloc::string::String,
/// The name of the column storing the version of the metric.
/// Default: Timestamp
/// See in-depth description in [ClickHouse documentation](<https://clickhouse.com/docs/ru/engines/table-engines/mergetree-family/graphitemergetree#required-columns>)
#[prost(string, tag = "6")]
pub version_column_name: ::prost::alloc::string::String,
}
/// Nested message and enum types in `GraphiteRollup`.
pub mod graphite_rollup {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Pattern {
/// Pattern for metric names.
#[prost(string, tag = "1")]
pub regexp: ::prost::alloc::string::String,
/// Name of the aggregating function to apply to data of the age specified in \[retention\].
#[prost(string, tag = "2")]
pub function: ::prost::alloc::string::String,
/// Age of data to use for thinning.
#[prost(message, repeated, tag = "3")]
pub retention: ::prost::alloc::vec::Vec<pattern::Retention>,
}
/// Nested message and enum types in `Pattern`.
pub mod pattern {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Retention {
/// Minimum age of the data in seconds.
#[prost(int64, tag = "1")]
pub age: i64,
/// Precision of determining the age of the data, in seconds.
#[prost(int64, tag = "2")]
pub precision: i64,
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryMaskingRule {
/// Name for the rule.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// RE2 compatible regular expression.
/// Required.
#[prost(string, tag = "2")]
pub regexp: ::prost::alloc::string::String,
/// Substitution string for sensitive data.
/// Default: six asterisks
#[prost(string, tag = "3")]
pub replace: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct QueryCache {
/// The maximum cache size in bytes.
/// Default: 1073741824 (1 GiB)
#[prost(message, optional, tag = "1")]
pub max_size_in_bytes: ::core::option::Option<i64>,
/// The maximum number of SELECT query results stored in the cache.
/// Default: 1024
#[prost(message, optional, tag = "2")]
pub max_entries: ::core::option::Option<i64>,
/// The maximum size in bytes SELECT query results may have to be saved in the cache.
/// Dafault: 1048576 (1 MiB)
#[prost(message, optional, tag = "3")]
pub max_entry_size_in_bytes: ::core::option::Option<i64>,
/// The maximum number of rows SELECT query results may have to be saved in the cache.
/// Default: 30000000 (30 mil)
#[prost(message, optional, tag = "4")]
pub max_entry_size_in_rows: ::core::option::Option<i64>,
}
/// JDBC bridge for queries to external databases.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JdbcBridge {
/// Host of jdbc bridge.
#[prost(string, tag = "1")]
pub host: ::prost::alloc::string::String,
/// Port of jdbc bridge.
#[prost(message, optional, tag = "2")]
pub port: ::core::option::Option<i64>,
}
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum LogLevel {
Unspecified = 0,
Trace = 1,
Debug = 2,
Information = 3,
Warning = 4,
Error = 5,
}
impl LogLevel {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
LogLevel::Unspecified => "LOG_LEVEL_UNSPECIFIED",
LogLevel::Trace => "TRACE",
LogLevel::Debug => "DEBUG",
LogLevel::Information => "INFORMATION",
LogLevel::Warning => "WARNING",
LogLevel::Error => "ERROR",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"LOG_LEVEL_UNSPECIFIED" => Some(Self::Unspecified),
"TRACE" => Some(Self::Trace),
"DEBUG" => Some(Self::Debug),
"INFORMATION" => Some(Self::Information),
"WARNING" => Some(Self::Warning),
"ERROR" => Some(Self::Error),
_ => None,
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ClickhouseConfigSet {
/// Effective settings for a ClickHouse cluster (a combination of settings defined
/// in \[user_config\] and \[default_config\]).
#[prost(message, optional, tag = "1")]
pub effective_config: ::core::option::Option<ClickhouseConfig>,
/// User-defined settings for a ClickHouse cluster.
#[prost(message, optional, tag = "2")]
pub user_config: ::core::option::Option<ClickhouseConfig>,
/// Default configuration for a ClickHouse cluster.
#[prost(message, optional, tag = "3")]
pub default_config: ::core::option::Option<ClickhouseConfig>,
}