slog_extlog/
slog_test.rs

1//!
2//! Copyright 2017 Metaswitch Networks
3//!
4//! Helper functions for use when testing slog logs.
5//!
6//! This module is a grab-bag of tools that have been found useful when testing slog logs.
7//! A typical test will create a new `iobuffer::IoBuffer`, pass it to `new_test_logger` to
8//! construct a logger, and pass that logger to the system under test. It will then exercise
9//! the system. Finally, it will use `logs_in_range` to extract the logs it is interested in
10//! in a canonical order, and test that they are as expected using standard Rust tools.
11//!
12//! # Example
13//! ```
14//! use slog::debug;
15//! use slog_extlog::slog_test;
16//!
17//! // Setup code
18//! let mut data = iobuffer::IoBuffer::new();
19//! let logger = slog_test::new_test_logger(data.clone());
20//!
21//! // Application code
22//! debug!(logger, "Something happened to it";
23//!        "subject" => "something",
24//!        "verb"    => "happened",
25//!        "object"  => "it");
26//!
27//! // Test code - parse all logs and check their contents.
28//! let logs = slog_test::read_json_values(&mut data);
29//! slog_test::assert_json_matches(&logs[0], &serde_json::json!({ "subject": "something", "object": "it" }));
30//! assert!(logs[0]["msg"].as_str().unwrap().contains("to it"));
31//!
32//! // More application code
33//! debug!(logger, "Another log"; "log_id" => "ABC123");
34//! debug!(logger, "Imposter"; "log_id" => "XYZ123");
35//!
36//! // Alternate test code - parse selected logs and check their contents.
37//! let abc_logs = slog_test::logs_in_range("ABC", "ABD", &mut data);
38//! assert_eq!(abc_logs.len(), 1);
39//! assert_eq!(abc_logs[0]["msg"].as_str().unwrap(), "Another log".to_string());
40//! ```
41//!
42//! # Statistics testing
43//!
44//! For verifying statistics, the `create_logger_buffer` and `check_expected_stats` methods
45//! are useful for creating a `slog_extlog::StatisticsLogger` and then verifying that statistics
46//! are generated as expected.
47#![allow(clippy::float_cmp)]
48pub use super::stats::*;
49
50use slog::o;
51#[allow(unused_imports)] // we need this trait for lines()
52use std::io::BufRead;
53use std::sync::Mutex;
54
55/// Buffer containing log data.
56pub type Buffer = iobuffer::IoBuffer;
57
58/// Create a new test logger suitable for use with `read_json_values`.
59pub fn new_test_logger(data: Buffer) -> slog::Logger {
60    slog::Logger::root(
61        slog::Fuse::new(Mutex::new(slog_json::Json::default(data))),
62        o!(),
63    )
64}
65
66/// Read all the newline-delimited JSON objects from the given stream,
67/// panicking if there is an IO error or a JSON parse error.
68/// No attempt is made to avoid reading partial lines from the stream.
69pub fn read_json_values(data: &mut Buffer) -> Vec<serde_json::Value> {
70    let iter = data.lines().map(move |line| {
71        serde_json::from_slice::<serde_json::Value>(&line).expect("JSON parse error")
72    });
73    iter.collect()
74}
75
76/// Test whether the given log lies in the given range: between
77/// `min_id` (inclusive) and `max_id` (exclusive).
78pub fn log_in_range(min_id: &str, max_id: &str, log: &serde_json::Value) -> bool {
79    match log["log_id"].as_str() {
80        Some(log_id) => log_id >= min_id && log_id < max_id,
81        None => false,
82    }
83}
84
85/// Collect all logs of the indicated type (see `log_in_range`) and sort them in
86/// ascending order of log_id.
87pub fn logs_in_range(min_id: &str, max_id: &str, data: &mut Buffer) -> Vec<serde_json::Value> {
88    let mut v = read_json_values(data)
89        .into_iter()
90        .filter(|log| log_in_range(min_id, max_id, log))
91        .collect::<Vec<_>>(); // LCOV_EXCL_LINE kcov bug?
92    v.sort_by_key(|log| log["log_id"].as_str().map(ToString::to_string));
93    v
94}
95
96/// Assert that every item contained in `expected` also appears in `actual`.
97/// Additional values in `actual` are ignored.
98pub fn assert_json_matches(actual: &serde_json::Value, expected: &serde_json::Value) {
99    fn check(
100        actual: &serde_json::Value,
101        expected: &serde_json::Value,
102        left: &serde_json::Value,
103        right: &serde_json::Value,
104        path: &str,
105    ) {
106        if left.is_object() && right.is_object() {
107            for (key, value) in right.as_object().unwrap().iter() {
108                let path = format!("{}.{}", path, key);
109                check(actual, expected, &left[key], value, &path);
110            }
111        } else if left.is_array() && right.is_array() {
112            for (index, value) in right.as_array().unwrap().iter().enumerate() {
113                let path = format!("{}.{}", path, index);
114                check(actual, expected, &left[index], value, &path);
115            }
116        } else {
117            assert!(
118                left == right,
119                "Mismatch at {}:\nexpected:\n{}\nbut found:\n{}",
120                path,
121                expected,
122                actual
123            );
124        }
125    }
126    check(actual, expected, actual, expected, "");
127}
128
129/// A default logging interval for tests, short so UTs run faster.
130pub static TEST_LOG_INTERVAL: u64 = 5;
131
132#[cfg(feature = "interval_logging")]
133fn new_stats_logger(stats: StatDefinitions, logger: slog::Logger) -> StatisticsLogger {
134    let builder = StatsLoggerBuilder::default();
135    builder
136        .with_stats(vec![stats])
137        .fuse_with_log_interval::<DefaultStatisticsLogFormatter>(TEST_LOG_INTERVAL, logger)
138}
139
140#[cfg(not(feature = "interval_logging"))]
141fn new_stats_logger(stats: StatDefinitions, logger: slog::Logger) -> StatisticsLogger {
142    let builder = StatsLoggerBuilder::default();
143    builder.with_stats(vec![stats]).fuse(logger)
144}
145
146/// Common setup function.
147///
148/// Creates a logger using the provided statistics and buffer so we can easily
149/// view the generated logs.
150pub fn create_logger_buffer(stats: StatDefinitions) -> (StatisticsLogger, Buffer) {
151    let data = iobuffer::IoBuffer::new();
152    let logger = new_test_logger(data.clone());
153    let stats_logger = new_stats_logger(stats, logger);
154    (stats_logger, data)
155}
156
157/// An expected statistic helper method.
158pub struct ExpectedStat {
159    /// Stat name E.g. "test_bucket_counter_grouped_freq"
160    pub stat_name: &'static str,
161    /// Stat tag E.g. Some("name=name,error=3,bucket=-5")
162    pub tag: Option<&'static str>,
163    /// Value of this stat E.g. 0f64
164    pub value: f64,
165    /// Type of metric for this stat E.g. "bucket_counter"
166    pub metric_type: &'static str,
167}
168
169/// Asserts that a set of logs (retrieved using `logs_in_range)` is exactly equal to an
170/// expected set of stats.
171///
172/// Particularly useful for grouped stats.
173pub fn check_expected_stats(logs: &[serde_json::Value], mut expected_stats: Vec<ExpectedStat>) {
174    for log in logs {
175        let mut matched = None;
176        for (id, exp) in expected_stats.iter().enumerate() {
177            if log["name"] == exp.stat_name
178                && (exp.tag.is_none() || log["tags"] == exp.tag.unwrap())
179            {
180                assert_eq!(logs[0]["metric_type"], exp.metric_type);
181                assert_eq!(log["value"].as_f64(), Some(exp.value));
182                matched = Some(id);
183                break;
184            }
185        }
186        assert!(matched.is_some());
187        expected_stats.remove(matched.unwrap());
188    }
189
190    assert_eq!(expected_stats.len(), 0);
191}
192
193// LCOV_EXCL_START Don't test derives
194/// ExpectedStatSnapshot helper.
195/// E.g.
196/// ExpectedStatSnapshot {
197///     name: "test_group_bucket_counter",
198///     description: "Test cumulative bucket counter with groups",
199///     stat_type: BucketCounter,
200///     values: vec![
201///         ExpectedStatSnapshotValue {
202///             group_values: vec!["one".to_string(), "two".to_string()],
203///             bucket_limit: Some(BucketLimit::Num(-8)),
204///             value: 0f64,
205///         },
206///         ExpectedStatSnapshotValue {
207///             group_values: vec!["one".to_string(), "two".to_string()],
208///             bucket_limit: Some(BucketLimit::Num(0)),
209///             value: 0f64,
210///         },
211///         ExpectedStatSnapshotValue {
212///             group_values: vec!["one".to_string(), "two".to_string()],
213///             bucket_limit: Some(BucketLimit::Unbounded),
214///             value: 3f64,
215///         },
216///         ExpectedStatSnapshotValue {
217///             group_values: vec!["three".to_string(), "four".to_string()],
218///             bucket_limit: Some(BucketLimit::Num(-8)),
219///             value: 4f64,
220///         },
221///         ExpectedStatSnapshotValue {
222///             group_values: vec!["three".to_string(), "four".to_string()],
223///             bucket_limit: Some(BucketLimit::Num(0)),
224///             value: 4f64,
225///         },
226///         ExpectedStatSnapshotValue {
227///             group_values: vec!["three".to_string(), "four".to_string()],
228///             bucket_limit: Some(BucketLimit::Unbounded),
229///             value: 4f64,
230///         },
231///     ],
232///     buckets: Some(Buckets::new(BucketMethod::CumulFreq, "bucket", &[-8, 0])),
233/// }
234#[derive(Debug)]
235pub struct ExpectedStatSnapshot {
236    /// Name of the stat
237    pub name: &'static str,
238    /// Description of the stat
239    pub description: &'static str,
240    /// Type of the stat
241    pub stat_type: StatType,
242    /// A vec of the SnapShot values expected for this snapshot.
243    pub values: Vec<ExpectedStatSnapshotValue>,
244    /// The buckets for the stat.
245    pub buckets: Option<Buckets>,
246}
247
248/// ExpectedStatSnapshotValue helper.
249#[derive(Debug)]
250pub struct ExpectedStatSnapshotValue {
251    /// The group values (tags) for which this value for the Snapshot os grouped by,
252    pub group_values: Vec<String>,
253    /// The value of this snapshot.
254    pub value: f64,
255    /// The upper limit of the bucket which is represented by this snapshot.
256    pub bucket_limit: Option<BucketLimit>,
257}
258// LCOV_EXCL_STOP
259
260/// Check that a set of stat snapshots are as expected.
261pub fn check_expected_stat_snapshots(
262    stats: &[StatSnapshot],
263    expected_stats: &[ExpectedStatSnapshot],
264) {
265    for stat in expected_stats {
266        let found_stat = stats.iter().find(|s| s.definition.name() == stat.name);
267
268        assert!(found_stat.is_some(), "Failed to find stat {}", stat.name);
269        let found_stat = found_stat.unwrap();
270
271        assert_eq!(found_stat.definition.stype(), stat.stat_type);
272        assert_eq!(found_stat.definition.description(), stat.description);
273        assert_eq!(found_stat.definition.buckets(), stat.buckets);
274
275        match found_stat.values {
276            StatSnapshotValues::Counter(ref vals) | StatSnapshotValues::Gauge(ref vals) => {
277                for value in &stat.values {
278                    let found_value = vals
279                        .iter()
280                        .find(|val| val.group_values == value.group_values);
281
282                    assert!(
283                        found_value.is_some(),
284                        "Failed to find value with groups {:?} and bucket_limit {:?} for stat {}",
285                        value.group_values, // LCOV_EXCL_LINE
286                        value.bucket_limit, // LCOV_EXCL_LINE
287                        stat.name           // LCOV_EXCL_LINE
288                    );
289                    let found_value = found_value.unwrap();
290                    assert_eq!(found_value.group_values, value.group_values);
291                    assert_eq!(found_value.value, value.value);
292                }
293            }
294
295            StatSnapshotValues::BucketCounter(ref buckets, ref vals) => {
296                assert_eq!(Some(buckets), stat.buckets.as_ref());
297                for value in &stat.values {
298                    let found_value = vals.iter().find(|(val, bucket)| {
299                        val.group_values == value.group_values
300                            && Some(bucket) == value.bucket_limit.as_ref()
301                    });
302
303                    assert!(
304                        found_value.is_some(),
305                        "Failed to find value with groups {:?} and bucket_limit {:?} for stat {}",
306                        value.group_values, // LCOV_EXCL_LINE
307                        value.bucket_limit, // LCOV_EXCL_LINE
308                        stat.name           // LCOV_EXCL_LINE
309                    );
310                    let (found_value, _) = found_value.unwrap();
311                    assert_eq!(found_value.group_values, value.group_values);
312                    assert_eq!(found_value.value, value.value);
313                }
314            }
315        }
316    }
317
318    assert_eq!(stats.len(), expected_stats.len());
319}
320
321#[cfg(test)]
322mod tests {
323    use super::*;
324    use slog::debug;
325    use std::sync::mpsc;
326    use std::thread;
327
328    /// Ensure that partial writes don't cause JSON parsing errors.
329    #[test]
330    fn test_partial_write() {
331        // Set up the logger.
332        let mut data = iobuffer::IoBuffer::new();
333        let logger = new_test_logger(data.clone());
334
335        let (started_send, started_recv) = mpsc::channel();
336        let (done_send, done_recv) = mpsc::channel();
337
338        // In a separate thread, repeatedly write JSON values until
339        // told to stop. There are lots of fields, and serde `write()`s each
340        // value separately, so there's plenty of opportunity for reading an
341        // incomplete record.
342        let _ = thread::spawn(move || {
343            started_send.send(()).unwrap();
344            while done_recv.try_recv().is_err() {
345                debug!(logger, "Some data";
346                       "alfa" => "alpha",
347                       "bravo" => "beta",
348                       "charlie" => "gamma",
349                       "delta" => "delta",
350                       "echo" => "epsilon");
351            }
352        });
353
354        // Wait until the thread has started.
355        started_recv.recv().unwrap();
356
357        // Now try to read some values. This should not fail with a parse
358        // error.
359        let _ = read_json_values(&mut data);
360
361        // Tell the daemon thread to stop so as not to leak CPU/memory.
362        done_send.send(()).unwrap();
363    }
364}