axiom_rs/datasets/
model.rs

1use bitflags::bitflags;
2use bitflags_serde_shim::impl_serde_for_bitflags;
3use chrono::{DateTime, Utc};
4use http::header::HeaderValue;
5use serde::{
6    de::{self, Visitor},
7    Deserialize, Deserializer, Serialize, Serializer,
8};
9use serde_json::value::Value as JsonValue;
10use std::{
11    collections::HashMap,
12    fmt::{self, Display},
13    ops::Add,
14    str::FromStr,
15};
16
17pub use table::*;
18
19use crate::serde::deserialize_null_default;
20
21/// The default field the server looks for a time to use as
22/// ingestion time. If not present, the server will set the ingestion time by
23/// itself.
24pub static TIMESTAMP_FIELD: &str = "_time";
25
26/// All supported content types.
27#[derive(Debug, Clone, Copy, PartialEq, Eq)]
28#[non_exhaustive]
29pub enum ContentType {
30    /// JSON treats the data as JSON array.
31    Json,
32    /// NDJSON treats the data as newline delimited JSON objects. Preferred
33    /// format.
34    NdJson,
35    /// CSV treats the data as CSV content.
36    Csv,
37}
38
39impl ContentType {
40    /// Returns the content type as a string.
41    #[must_use]
42    pub fn as_str(&self) -> &'static str {
43        match self {
44            ContentType::Json => "application/json",
45            ContentType::NdJson => "application/x-ndjson",
46            ContentType::Csv => "text/csv",
47        }
48    }
49}
50
51impl Display for ContentType {
52    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53        write!(f, "{}", self.as_str())
54    }
55}
56
57impl FromStr for ContentType {
58    type Err = crate::error::Error;
59
60    fn from_str(s: &str) -> Result<Self, Self::Err> {
61        match s {
62            "application/json" => Ok(ContentType::Json),
63            "application/x-ndjson" => Ok(ContentType::NdJson),
64            "text/csv" => Ok(ContentType::Csv),
65            _ => Err(crate::error::Error::InvalidContentType(s.to_string())),
66        }
67    }
68}
69
70impl From<ContentType> for HeaderValue {
71    fn from(content_type: ContentType) -> Self {
72        HeaderValue::from_static(content_type.as_str())
73    }
74}
75
76/// All supported content encoding
77#[derive(Debug, Clone, Copy, PartialEq, Eq)]
78#[non_exhaustive]
79pub enum ContentEncoding {
80    /// Identity marks the data as not being encoded.
81    Identity,
82    /// GZIP marks the data as being gzip encoded.
83    Gzip,
84    /// Zstd marks the data as being zstd encoded.
85    Zstd,
86}
87
88impl ContentEncoding {
89    /// Returns the content encoding as a string.
90    #[must_use]
91    pub fn as_str(&self) -> &'static str {
92        match self {
93            ContentEncoding::Identity => "",
94            ContentEncoding::Gzip => "gzip",
95            ContentEncoding::Zstd => "zstd",
96        }
97    }
98}
99
100impl Display for ContentEncoding {
101    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
102        write!(f, "{}", self.as_str())
103    }
104}
105
106impl FromStr for ContentEncoding {
107    type Err = crate::error::Error;
108
109    fn from_str(s: &str) -> Result<Self, Self::Err> {
110        match s {
111            "" => Ok(ContentEncoding::Identity),
112            "gzip" => Ok(ContentEncoding::Gzip),
113            "zstd" => Ok(ContentEncoding::Zstd),
114            _ => Err(crate::error::Error::InvalidContentEncoding(s.to_string())),
115        }
116    }
117}
118
119impl From<ContentEncoding> for HeaderValue {
120    fn from(content_encoding: ContentEncoding) -> Self {
121        HeaderValue::from_static(content_encoding.as_str())
122    }
123}
124
125/// An Axiom dataset.
126#[derive(Serialize, Deserialize, Debug)]
127pub struct Dataset {
128    /// The name of the dataset.
129    pub name: String,
130    /// The description of the dataset.
131    pub description: String,
132    /// The ID of the user who created the dataset.
133    #[serde(rename = "who")]
134    pub created_by: String,
135    /// The time the dataset was created at.
136    #[serde(rename = "created")]
137    pub created_at: DateTime<Utc>,
138    // ignored: integrationConfigs, integrationFilters, quickQueries
139}
140
141/// Details of the information stored in a dataset.
142#[derive(Serialize, Deserialize, Debug)]
143#[serde(rename_all = "camelCase")]
144pub struct Stat {
145    /// The unique name of the dataset.
146    pub name: String,
147    /// The number of events of the dataset.
148    pub num_events: u64,
149    /// The number of fields of the dataset.
150    pub num_fields: u32,
151    /// The amount of data stored in the dataset.
152    pub input_bytes: u64,
153    /// The amount of compressed data stored in the dataset.
154    pub compressed_bytes: u64,
155    /// The time of the oldest event stored in the dataset.
156    pub min_time: Option<DateTime<Utc>>,
157    /// The time of the newest event stored in the dataset.
158    pub max_time: Option<DateTime<Utc>>,
159    /// The time the dataset was created at.
160    #[serde(rename = "created")]
161    pub created_at: DateTime<Utc>,
162}
163
164/// Details of the information stored inside a dataset including the fields.
165#[derive(Serialize, Deserialize, Debug)]
166#[serde(rename_all = "camelCase")]
167pub struct Info {
168    /// The stats of the dataset.
169    #[serde(flatten)]
170    pub stat: Stat,
171    /// The fields of the dataset.
172    pub fields: Vec<Field>,
173}
174
175/// Returned on event ingestion operation.
176#[derive(Serialize, Deserialize, Debug, Default)]
177#[serde(rename_all = "camelCase")]
178pub struct IngestStatus {
179    /// Amount of events that have been ingested.
180    pub ingested: u64,
181    /// Amount of events that failed to ingest.
182    pub failed: u64,
183    /// Ingestion failures, if any.
184    pub failures: Vec<IngestFailure>,
185    /// Number of bytes processed.
186    pub processed_bytes: u64,
187    /// Amount of blocks created.
188    #[deprecated(
189        since = "0.8.0",
190        note = "This field will be removed in a future version."
191    )]
192    pub blocks_created: u32,
193    /// The length of the Write-Ahead Log.
194    #[deprecated(
195        since = "0.8.0",
196        note = "This field will be removed in a future version."
197    )]
198    pub wal_length: u32,
199}
200
201impl Add for IngestStatus {
202    type Output = Self;
203
204    fn add(self, other: Self) -> Self {
205        let mut failures = self.failures;
206        failures.extend(other.failures);
207
208        #[allow(deprecated)]
209        Self {
210            ingested: self.ingested + other.ingested,
211            failed: self.failed + other.failed,
212            failures,
213            processed_bytes: self.processed_bytes + other.processed_bytes,
214            blocks_created: self.blocks_created + other.blocks_created,
215            wal_length: other.wal_length,
216        }
217    }
218}
219
220/// Ingestion failure of a single event.
221#[derive(Serialize, Deserialize, Debug)]
222pub struct IngestFailure {
223    /// Timestamp of the event that failed to ingest.
224    pub timestamp: DateTime<Utc>,
225    /// Error that made the event fail to ingest.
226    pub error: String,
227}
228
229/// Used to create a dataset.
230#[derive(Serialize, Debug)]
231pub(crate) struct DatasetCreateRequest {
232    /// Restricted to 128 bytes of [a-zA-Z0-9] and special characters "-", "_"
233    /// and ".". Special characters cannot be a prefix or suffix. The prefix
234    /// cannot be "axiom-".
235    pub name: String,
236    /// Description of the dataset.
237    pub description: String,
238}
239
240/// Used to update a dataset.
241#[derive(Serialize, Deserialize, Debug)]
242pub(crate) struct DatasetUpdateRequest {
243    /// Description of the dataset to update.
244    pub description: String,
245}
246
247/// A query that gets executed on a dataset.
248/// If you're looking for the analytics, check out [`Query`].
249#[derive(Serialize, Deserialize, Debug, Default, Eq, PartialEq)]
250#[serde(rename_all = "camelCase")]
251pub struct Query {
252    /// The APL of the query to execute
253    pub apl: String,
254    /// Start time of the query.
255    pub start_time: Option<DateTime<Utc>>,
256    /// End time of the query.
257    pub end_time: Option<DateTime<Utc>>,
258    /// cursor for the query
259    pub cursor: Option<String>,
260    /// Specifies whether the event that matches the cursor should be included or not
261    pub include_cursor: bool,
262    /// Requests the cursor to be included in the response
263    pub include_cursor_field: bool,
264}
265
266impl Query {
267    /// Creates a new query with the given APL and options.
268    pub fn new<S: ToString + ?Sized>(apl: &S, opts: QueryOptions) -> Self {
269        Self {
270            apl: apl.to_string(),
271            start_time: opts.start_time,
272            end_time: opts.end_time,
273            cursor: opts.cursor,
274            include_cursor: opts.include_cursor,
275            include_cursor_field: opts.include_cursor_field,
276        }
277    }
278}
279
280// We need to pass a reference for serde compatibility.
281#[allow(clippy::trivially_copy_pass_by_ref)]
282fn is_false(b: &bool) -> bool {
283    !*b
284}
285
286// QueryParams is the part of `QueryOptions` that is added to the request url.
287#[derive(Serialize, Debug, Default)]
288pub(crate) struct QueryParams {
289    #[serde(rename = "nocache", skip_serializing_if = "is_false")]
290    pub no_cache: bool,
291    #[serde(rename = "saveAsKind", skip_serializing_if = "Option::is_none")]
292    pub save: Option<QueryKind>,
293    pub format: AplResultFormat,
294}
295
296impl From<&QueryOptions> for QueryParams {
297    fn from(options: &QueryOptions) -> Self {
298        let save = if options.save {
299            Some(QueryKind::Apl)
300        } else {
301            None
302        };
303        Self {
304            no_cache: options.no_cache,
305            save,
306            format: options.format,
307        }
308    }
309}
310
311// This is a configuration that just happens to have many flags.
312#[allow(clippy::struct_excessive_bools)]
313/// The optional parameters to APL query methods.
314#[derive(Debug, Default, Clone)]
315pub struct QueryOptions {
316    /// The start time of the query.
317    pub start_time: Option<DateTime<Utc>>,
318    /// The end time of the query.
319    pub end_time: Option<DateTime<Utc>>,
320    /// The cursor for use in pagination.
321    pub cursor: Option<String>,
322    /// Specifies whether the event that matches the cursor should be
323    /// included in the result.
324    pub include_cursor: bool,
325    /// Omits the query cache.
326    pub no_cache: bool,
327    /// Save the query on the server, if set to `true`. The ID of the saved query
328    /// is returned with the query result as part of the response.
329    pub save: bool,
330    /// Format specifies the format of the APL query. Defaults to Legacy.
331    pub format: AplResultFormat,
332    /// Requests the cursor to be included in the response
333    pub include_cursor_field: bool,
334}
335
336/// The result format of an APL query.
337#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Default)]
338#[non_exhaustive]
339#[serde(rename_all = "camelCase")]
340pub enum AplResultFormat {
341    /// Tabular result format
342    #[default]
343    Tabular,
344}
345
346/// The kind of a query.
347#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
348#[non_exhaustive]
349#[serde(rename_all = "camelCase")]
350pub enum QueryKind {
351    /// Analytics query
352    #[default]
353    Analytics,
354    /// Streaming query
355    Stream,
356    /// APL query,   Read-only, don't use this for requests.
357    Apl,
358}
359
360/// A field that is projected to the query result.
361#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
362pub struct Projection {
363    /// The name of the field to project.
364    pub field: String,
365    /// The alias to reference the projected field by.
366    pub alias: Option<String>,
367}
368
369/// Supported aggregation operations.
370#[derive(Debug, PartialEq, Eq)]
371#[non_exhaustive]
372pub enum AggregationOp {
373    /// [count](https://axiom.co/docs/apl/aggregation-function/statistical-functions#count())
374    Count,
375    /// [dcount](https://axiom.co/docs/apl/aggregation-function/statistical-functions#dcount())
376    CountDistinct,
377    /// [make_set](https://axiom.co/docs/apl/aggregation-function/statistical-functions#make-set())
378    MakeSet,
379    /// [make_set_if](https://axiom.co/docs/apl/aggregation-function/statistical-functions#make-set-if())
380    MakeSetIf,
381
382    /// [sum](https://axiom.co/docs/apl/aggregation-function/statistical-functions#sum())
383    Sum,
384    /// [avg](https://axiom.co/docs/apl/aggregation-function/statistical-functions#avg())
385    Avg,
386    /// [min](https://axiom.co/docs/apl/aggregation-function/statistical-functions#min())
387    Min,
388    /// [max](https://axiom.co/docs/apl/aggregation-function/statistical-functions#max())
389    Max,
390    /// [topk](https://axiom.co/docs/apl/aggregation-function/statistical-functions#topk())
391    Topk,
392    /// [percentile](https://axiom.co/docs/apl/aggregation-function/statistical-functions#percentile(),-percentiles-array())
393    Percentiles,
394    /// [histogram](https://axiom.co/docs/apl/aggregation-function/statistical-functions#histogram())
395    Histogram,
396    /// [stdev](https://axiom.co/docs/apl/aggregation-function/statistical-functions#stdev())
397    StandardDeviation,
398    /// [variance](https://axiom.co/docs/apl/aggregation-function/statistical-functions#variance())
399    Variance,
400    /// [argmin](https://axiom.co/docs/apl/aggregation-function/statistical-functions#argmin())
401    ArgMin,
402    /// [argmax](https://axiom.co/docs/apl/aggregation-function/statistical-functions#argmax())
403    ArgMax,
404
405    /// Read-only. Not to be used for query requests. Only in place to support the APL query result.
406    /// [countif](https://axiom.co/docs/apl/aggregation-function/statistical-functions#countif())
407    CountIf,
408    /// Read-only. Not to be used for query requests. Only in place to support the APL query result.
409    /// [dcountif](https://axiom.co/docs/apl/aggregation-function/statistical-functions#dcountif())
410    DistinctIf,
411
412    /// Unknown aggregation operation.
413    Unknown(String),
414}
415
416impl Serialize for AggregationOp {
417    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
418    where
419        S: Serializer,
420    {
421        serializer.serialize_str(match self {
422            Self::Count => "count",
423            Self::CountDistinct => "distinct",
424            Self::MakeSet => "makeset",
425            Self::MakeSetIf => "makesetif",
426            Self::Sum => "sum",
427            Self::Avg => "avg",
428            Self::Min => "min",
429            Self::Max => "max",
430            Self::Topk => "topk",
431            Self::Percentiles => "percentiles",
432            Self::Histogram => "histogram",
433            Self::StandardDeviation => "stdev",
434            Self::Variance => "variance",
435            Self::ArgMin => "argmin",
436            Self::ArgMax => "argmax",
437            Self::CountIf => "countif",
438            Self::DistinctIf => "distinctif",
439            Self::Unknown(ref s) => s,
440        })
441    }
442}
443
444struct AggregationOpVisitor;
445
446impl Visitor<'_> for AggregationOpVisitor {
447    type Value = AggregationOp;
448
449    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
450        write!(formatter, "a valid aggregation op string")
451    }
452
453    fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
454    where
455        E: de::Error,
456    {
457        match s {
458            "count" => Ok(Self::Value::Count),
459            "distinct" => Ok(Self::Value::CountDistinct),
460            "makeset" => Ok(Self::Value::MakeSet),
461            "makesetif" => Ok(Self::Value::MakeSetIf),
462            "sum" => Ok(Self::Value::Sum),
463            "avg" => Ok(Self::Value::Avg),
464            "min" => Ok(Self::Value::Min),
465            "max" => Ok(Self::Value::Max),
466            "topk" => Ok(Self::Value::Topk),
467            "percentiles" => Ok(Self::Value::Percentiles),
468            "histogram" => Ok(Self::Value::Histogram),
469            "stdev" => Ok(Self::Value::StandardDeviation),
470            "variance" => Ok(Self::Value::Variance),
471            "argmin" => Ok(Self::Value::ArgMin),
472            "argmax" => Ok(Self::Value::ArgMax),
473            "countif" => Ok(Self::Value::CountIf),
474            "distinctif" => Ok(Self::Value::DistinctIf),
475            aggregation => Ok(Self::Value::Unknown(aggregation.to_string())),
476        }
477    }
478}
479
480impl<'de> Deserialize<'de> for AggregationOp {
481    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
482    where
483        D: Deserializer<'de>,
484    {
485        deserializer.deserialize_str(AggregationOpVisitor {})
486    }
487}
488
489/// Aggregations are applied to a query.
490#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
491pub struct Aggregation {
492    /// The alias for the aggregation.
493    pub alias: Option<String>,
494    /// The operation of the aggregation.
495    pub op: AggregationOp,
496    /// The field to aggregate on.
497    pub field: String,
498    /// Argument to the aggregation.
499    /// Only valid for `OpCountDistinctIf`, `OpTopk`, `OpPercentiles` and
500    /// `OpHistogram` aggregations.
501    #[serde(skip_serializing_if = "Option::is_none")]
502    pub argument: Option<JsonValue>,
503}
504
505/// Supported filter operations. Supported types listed behind each operation.
506#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
507#[non_exhaustive]
508#[serde(rename_all = "lowercase")]
509enum FilterOp {
510    /// Logical AND
511    And,
512    /// Logical OR
513    Or,
514    /// Logical NOT
515    Not,
516
517    // Works for strings and numbers.
518    /// equality (string, number)
519    #[serde(rename = "==")]
520    Equal,
521    /// negated equality (string, number)
522    #[serde(rename = "!=")]
523    NotEqual,
524    /// existance (string, number)
525    Exists,
526    /// negated existance (string, number)
527    NotExists,
528
529    // Only works for numbers.
530    /// greater than (number)
531    #[serde(rename = ">")]
532    GreaterThan,
533    /// greater than or equal (number)
534    #[serde(rename = ">=")]
535    GreaterThanEqual,
536    /// less than (number)
537    #[serde(rename = "<")]
538    LessThan,
539    /// less than or equal (number)
540    #[serde(rename = "<=")]
541    LessThanEqual,
542
543    // Only works for strings.
544    /// starts with (string)
545    StartsWith,
546    /// negated starts with (string)
547    NotStartsWith,
548    /// ends with (string)
549    EndsWith,
550    /// negated ends with (string)
551    NotEndsWith,
552    /// regular expression (string)
553    Regexp,
554    /// negated regular expression (string)
555    NotRegexp,
556
557    // Works for strings and arrays.
558    /// contains (string, array)
559    Contains,
560    /// negated contains (string, array)
561    NotContains,
562}
563
564/// A filter is applied to a query.
565#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
566#[serde(rename_all = "camelCase")]
567struct Filter {
568    /// The operation of the filter.
569    pub op: FilterOp,
570    /// The field to filter on.
571    pub field: String,
572    /// The value to filter against.
573    pub value: JsonValue,
574    /// If the filter should be case insensitive.
575    #[serde(default)]
576    pub case_insensitive: bool,
577    /// Child filters that are applied to the filter.
578    #[serde(default, deserialize_with = "deserialize_null_default")]
579    pub children: Vec<Filter>,
580}
581
582impl Default for Filter {
583    fn default() -> Self {
584        Filter {
585            op: FilterOp::Equal,
586            field: String::new(),
587            value: JsonValue::Null,
588            case_insensitive: false,
589            children: vec![],
590        }
591    }
592}
593
594/// A `VirtualField` is not part of a dataset and its value is derived from an
595/// expression. Aggregations, filters and orders can reference this field like
596/// any other field.
597#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
598pub struct VirtualField {
599    /// Alias the virtual field is referenced by.
600    pub alias: String,
601    /// Expression which specifies the virtual fields value.
602    pub expr: String,
603}
604
605mod table;
606/// The query result. It embeds the APL request in the result it created.
607#[derive(Serialize, Deserialize, Debug)]
608#[serde(rename_all = "camelCase")]
609pub struct QueryResult {
610    /// The status of the query result.
611    pub status: QueryStatus,
612
613    /// The tables that were queried.
614    pub tables: Vec<table::Table>,
615    /// The ID of the query that generated this result when it was saved on the
616    /// server. This is only set when the query was send with the `SaveKind`
617    /// option specified.
618    #[serde(skip)]
619    pub saved_query_id: Option<String>,
620}
621
622/// The status of a query result.
623#[derive(Serialize, Deserialize, Debug)]
624#[serde(rename_all = "camelCase")]
625pub struct QueryStatus {
626    /// The duration it took the query to execute.
627    pub elapsed_time: u64,
628    /// The amount of blocks that have been examined by the query.
629    pub blocks_examined: u64,
630    /// The amount of rows that have been examined by the query.
631    pub rows_examined: u64,
632    /// The amount of rows that matched the query.
633    pub rows_matched: u64,
634    /// The amount of groups returned by the query.
635    pub num_groups: u32,
636    /// True if the query result is a partial result.
637    pub is_partial: bool,
638    /// Populated when `IsPartial` is true, must be passed to the next query
639    /// request to retrieve the next result set.
640    pub continuation_token: Option<String>,
641    /// True if the query result is estimated.
642    #[serde(default)]
643    pub is_estimate: bool,
644    /// The status of the cache.
645    pub cache_status: CacheStatus,
646    /// The timestamp of the oldest block examined.
647    pub min_block_time: DateTime<Utc>,
648    /// The timestamp of the newest block examined.
649    pub max_block_time: DateTime<Utc>,
650    /// Messages associated with the query.
651    #[serde(default, deserialize_with = "deserialize_null_default")]
652    pub messages: Vec<QueryMessage>,
653    /// Row id of the newest row, as seen server side.
654    pub max_cursor: Option<String>,
655    /// Row id of the oldest row, as seen server side.
656    pub min_cursor: Option<String>,
657}
658
659bitflags! {
660    /// The cache status of the query.
661    #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
662    pub struct CacheStatus: u32 {
663        /// cache miss
664        const Miss = 1;
665        /// Filtered rows
666        const Materialized = 2;
667        /// Aggregated and grouped records
668        const Results = 4;
669        /// WAL is cached
670        const WalCached = 8;
671    }
672}
673impl_serde_for_bitflags!(CacheStatus);
674
675/// A message that is returned in the status of a query.
676#[derive(Serialize, Deserialize, Debug)]
677pub struct QueryMessage {
678    priority: QueryMessagePriority,
679    count: u32,
680    code: QueryMessageCode,
681    text: Option<String>,
682}
683
684/// The priority of a query message.
685#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Copy)]
686#[non_exhaustive]
687#[serde(rename_all = "camelCase")]
688pub enum QueryMessagePriority {
689    /// Trace message priority.
690    Trace,
691    /// Debug message priority.
692    Debug,
693    /// Info message priority.
694    Info,
695    /// Warn message priority.
696    Warn,
697    /// Error message priority.
698    Error,
699    /// Fatal message priority.
700    Fatal,
701}
702
703/// The code of a message that is returned in the status of a query.
704#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Copy)]
705#[non_exhaustive]
706#[serde(rename_all = "snake_case")]
707pub enum QueryMessageCode {
708    /// Failed to finalize a virtual field.
709    VirtualFieldFinalizeError,
710    /// Missing column in the dataset.
711    MissingColumn,
712    /// Default limit warning.
713    DefaultLimitWarning,
714    /// License limit for query warning.
715    LicenseLimitForQueryWarning,
716    /// Other unknown error
717    #[serde(other)]
718    Unknown,
719}
720
721/// An event that matched a query and is thus part of the result set.
722#[derive(Serialize, Deserialize, Debug)]
723pub struct Entry {
724    /// The time the event occurred. Matches `SysTime` if not specified during
725    /// ingestion.
726    #[serde(rename = "_time")]
727    pub time: DateTime<Utc>,
728    /// The time the event was recorded on the server.
729    #[serde(rename = "_sysTime")]
730    pub sys_time: DateTime<Utc>,
731    /// The unique ID of the event row.
732    #[serde(rename = "_rowId")]
733    pub row_id: String,
734    /// Contains the raw data of the event (with filters and aggregations
735    /// applied).
736    pub data: HashMap<String, JsonValue>,
737}
738
739/// A queried time series.
740#[derive(Serialize, Deserialize, Debug)]
741pub struct Timeseries {
742    /// The intervals that build a time series.
743    pub series: Vec<Interval>,
744    /// The totals of the time series.
745    pub totals: Vec<EntryGroup>,
746}
747
748/// The interval of queried time series.
749#[derive(Serialize, Deserialize, Debug)]
750#[serde(rename_all = "camelCase")]
751pub struct Interval {
752    /// The start time of the interval.
753    pub start_time: DateTime<Utc>,
754    /// The end time of the interval.
755    pub end_time: DateTime<Utc>,
756    /// The groups of the interval.
757    #[serde(default, deserialize_with = "deserialize_null_default")]
758    pub groups: Vec<EntryGroup>,
759}
760
761/// A group of queried event.
762#[derive(Serialize, Deserialize, Debug)]
763pub struct EntryGroup {
764    /// The unique ID of the group.
765    pub id: u64,
766    /// The data of the group.
767    pub group: HashMap<String, JsonValue>,
768    /// The aggregations of the group.
769    pub aggregations: Vec<EntryGroupAgg>,
770}
771
772/// An aggregation which is part of a group of queried events.
773#[derive(Serialize, Deserialize, Debug)]
774pub struct EntryGroupAgg {
775    /// The alias of the aggregation.
776    #[serde(rename = "op")]
777    pub alias: String,
778    /// The value of the aggregation.
779    pub value: JsonValue,
780}
781
782#[cfg(test)]
783mod test {
784    use super::*;
785
786    #[test]
787    fn test_aggregation_op() {
788        let enum_repr = AggregationOp::Count;
789        let json_repr = r#""count""#;
790        assert_eq!(
791            serde_json::to_string(&enum_repr).expect("json error"),
792            json_repr
793        );
794        assert_eq!(
795            serde_json::from_str::<AggregationOp>(json_repr).expect("json error"),
796            enum_repr
797        );
798    }
799
800    #[test]
801    fn test_filter_op() {
802        let enum_repr = FilterOp::And;
803        let json_repr = r#""and""#;
804        assert_eq!(
805            serde_json::to_string(&enum_repr).expect("json error"),
806            json_repr
807        );
808        assert_eq!(
809            serde_json::from_str::<FilterOp>(json_repr).expect("json error"),
810            enum_repr
811        );
812
813        let enum_repr = FilterOp::Equal;
814        let json_repr = r#""==""#;
815        assert_eq!(
816            serde_json::to_string(&enum_repr).expect("json error"),
817            json_repr
818        );
819        assert_eq!(
820            serde_json::from_str::<FilterOp>(json_repr).expect("json error"),
821            enum_repr
822        );
823    }
824
825    #[test]
826    fn test_kind_false() {
827        let query = QueryParams {
828            no_cache: false,
829            save: None,
830            format: AplResultFormat::Tabular,
831        };
832        let json_repr = r#"{"format":"tabular"}"#;
833        assert_eq!(
834            serde_json::to_string(&query).expect("json error"),
835            json_repr
836        );
837
838        let query = QueryParams {
839            no_cache: true,
840            save: None,
841            format: AplResultFormat::Tabular,
842        };
843        let json_repr = r#"{"nocache":true,"format":"tabular"}"#;
844        assert_eq!(
845            serde_json::to_string(&query).expect("json error"),
846            json_repr
847        );
848
849        let query = QueryParams {
850            no_cache: false,
851            save: Some(QueryKind::Apl),
852            format: AplResultFormat::Tabular,
853        };
854        let json_repr = r#"{"saveAsKind":"apl","format":"tabular"}"#;
855        assert_eq!(
856            serde_json::to_string(&query).expect("json error"),
857            json_repr
858        );
859    }
860}