#![allow(clippy::needless_doctest_main)]
#[cfg_attr(test, macro_use)]
extern crate arcon_macros;
extern crate self as arcon;
pub use arcon_macros::*;
#[doc(hidden)]
pub use arcon_state::*;
#[doc(hidden)]
pub use crate::index::{ArconState, IndexOps};
#[doc(hidden)]
pub use arcon_state::error::ArconStateError;
#[doc(hidden)]
pub use crate::data::{ArconType, VersionId};
#[doc(hidden)]
pub use crate::{
data::arrow::ToArrow,
error::ArconResult,
table::{ImmutableTable, MutableTable, RecordBatchBuilder, RECORD_BATCH_SIZE},
};
#[doc(hidden)]
pub use arrow::{
array::{
ArrayBuilder, ArrayData, ArrayDataBuilder, PrimitiveBuilder, StringBuilder, StructArray,
StructBuilder, UInt64Array, UInt64Builder,
},
datatypes::{DataType, Field, Schema},
error::ArrowError,
};
#[doc(hidden)]
pub use kompact::prelude::SerId;
#[doc(hidden)]
pub mod bench_utils {
pub use crate::buffer::event::{BufferPool, BufferReader};
}
pub mod application;
mod buffer;
mod data;
pub mod dataflow;
pub mod error;
pub mod index;
mod manager;
#[cfg(feature = "metrics")]
#[allow(dead_code)]
mod metrics;
mod stream;
mod table;
#[cfg(test)]
mod test;
mod util;
#[cfg(test)]
pub mod test_utils {
use arcon_allocator::Allocator;
use arcon_state::backend::Backend;
use once_cell::sync::Lazy;
use std::sync::{Arc, Mutex};
pub static ALLOCATOR: Lazy<Arc<Mutex<Allocator>>> =
Lazy::new(|| Arc::new(Mutex::new(Allocator::new(1073741824))));
pub fn temp_backend<B: Backend>() -> B {
let test_dir = tempfile::tempdir().unwrap();
let path = test_dir.path();
B::create(path, "testDB".to_string()).unwrap()
}
}
pub mod prelude {
pub use crate::{
application::conf::{logger::LoggerType, ApplicationConf},
application::{Application, ApplicationBuilder},
data::{ArconElement, ArconNever, ArconType, StateID, VersionId},
dataflow::{
builder::{Assigner, OperatorBuilder, SourceBuilder},
conf::{OperatorConf, ParallelismStrategy, SourceConf, StreamKind, WindowConf},
dfg::ChannelKind,
sink::{Sink, ToBuilderExt, ToSinkExt},
source::{LocalFileSource, ToStreamExt},
stream::{
FilterExt, KeyBuilder, KeyedStream, MapExt, OperatorExt, PartitionExt, Stream,
},
},
manager::snapshot::Snapshot,
stream::{
operator::{
function::{Filter, FlatMap, Map, MapInPlace},
sink::local_file::LocalFileSink,
window::{WindowAssigner, WindowState},
Operator, OperatorContext,
},
source::{schema::ProtoSchema, Source},
time::{ArconTime, Time},
},
Arcon, ArconState,
};
#[cfg(feature = "kafka")]
pub use crate::dataflow::source::kafka::KafkaSource;
#[cfg(feature = "kafka")]
pub use crate::stream::source::kafka::KafkaConsumerConf;
#[cfg(all(feature = "serde_json", feature = "serde"))]
pub use crate::stream::source::schema::JsonSchema;
#[cfg(feature = "kafka")]
pub use rdkafka::config::ClientConfig;
pub use crate::error::{timer::TimerExpiredError, ArconResult, StateResult};
#[doc(hidden)]
pub use kompact::{
default_components::*,
prelude::{Channel as KompactChannel, *},
};
pub use super::{Arrow, MutableTable, ToArrow};
pub use arrow::{datatypes::Schema, record_batch::RecordBatch};
pub use arcon_state as state;
#[cfg(feature = "rocksdb")]
pub use arcon_state::Rocks;
pub use arcon_state::{
Aggregator, AggregatorState, Backend, BackendType, Handle, MapState, ReducerState, Sled,
ValueState, VecState,
};
pub use crate::index::{
AppenderIndex, AppenderWindow, ArrowWindow, EagerAppender, EagerHashTable, EagerValue,
EmptyState, HashTable, IncrementalWindow, IndexOps, LazyValue, LocalValue, ValueIndex,
};
pub use prost::*;
pub use std::sync::Arc;
#[cfg(all(feature = "hardware_counters", target_os = "linux"))]
pub use crate::metrics::perf_event::{HardwareCounter, PerfEvents};
}