1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
//! A testing library utilizing golden tests.
//!
//! ### Why golden tests?
//!
//! Golden tests allow you to specify the output of
//! some command within a file and automatically ensure
//! that that output doesn't change. If it does, goldentests
//! will show an error-diff showing the expected and actual
//! output. This way, whenever the output of something changes
//! a human can see the change and decide if it should be kept
//! or is a bug and should be reverted.
//!
//! ### What are golden tests useful for?
//!
//! Golden tests are especially useful for applications that
//! take a file as input and produce output of some kind. For
//! example: compilers and config-parsers (well, parsers in general)
//! are two such applications that can benefit form automated golden
//! tests. In the case of a config parser, you would be able to
//! provide many config examples as tests and ensure that your
//! parser was able to read the files with the expected stdout/stderr
//! output and exit code.
//!
//! ### How do I get started?
//!
//! Include a test in your program that looks something like this:
//!
//! ```rust
//! use goldentests::{ TestConfig, TestResult };
//! 
//! #[test]
//! fn run_goldentests() -> TestResult<()> {
//!     // Replace "// " with your language's/parser's comment syntax.
//!     // This tells golden tests to embed its keywords in lines beginning with "// "
//!     let config = TestConfig::new("target/debug/my-binary", "directory/with/tests", "// ")?;
//!     config.run_tests()
//! }
//! ```
//!
//! Now you can start adding tests to `directory/with/tests` and each test should
//! be automatically found and ran by goldentests whenever you run `cargo test`.
//! Here's a quick example of a test file that uses all of goldentest's features:
//!
//! ```python
//! import sys
//! 
//! print("hello!\nfriend!")
//! print("error!", file=sys.stderr)
//! sys.exit(3)
//! 
//! # Assuming 'python' is the command passed to TestConfig::new:
//! # args: -B
//! # expected exit status: 3
//! # expected stdout:
//! # hello!
//! # friend!
//! 
//! # expected stderr: error!
//! ```
//!
//! Check out the documentation in `TestConfig` for optional configuration
//! including verbose output.

pub mod config;
pub mod error;
mod diff_printer;

pub use config::TestConfig;
pub use error::TestError;
use diff_printer::DiffPrinter;

use colored::Colorize;
use similar::TextDiff;
use shlex;

use std::fs::File;
use std::path::{ Path, PathBuf };
use std::io::Read;
use std::process::{ Command, Output };

pub type TestResult<T> = Result<T, error::TestError>;

struct Test {
    path: PathBuf,
    command_line_args: String,
    expected_stdout: String,
    expected_stderr: String,
    expected_exit_status: Option<i32>,
}

#[derive(PartialEq)]
enum TestParseState {
    Neutral,
    ReadingExpectedStdout,
    ReadingExpectedStderr,
}

/// Expects that the given directory is an existing path
fn find_tests(directory: &Path) -> TestResult<Vec<PathBuf>> {
    let mut tests = vec![];

    for entry in std::fs::read_dir(directory).map_err(TestError::IoError)? {
        let entry = entry.map_err(TestError::IoError)?;
        let path = entry.path();

        if path.is_dir() {
            tests.append(&mut find_tests(&path)?);
        } else {
            tests.push(path);
        }
    }

    Ok(tests)
}

fn strip_prefix<'a>(s: &'a str, prefix: &str) -> &'a str {
    if s.starts_with(prefix) {
        &s[prefix.len()..]
    } else {
        s
    }
}

fn parse_test(test_path: &PathBuf, config: &TestConfig) -> TestResult<Test> {
    let path = test_path.clone();
    let mut command_line_args = String::new();
    let mut expected_stdout = String::new();
    let mut expected_stderr = String::new();
    let mut expected_exit_status = None;

    let mut file = File::open(test_path).map_err(TestError::IoError)?;
    let mut contents = String::new();
    file.read_to_string(&mut contents).map_err(TestError::IoError)?;

    let mut state = TestParseState::Neutral;
    for line in contents.lines() {
        if line.starts_with(&config.test_line_prefix) {
            // If we're currently reading stdout or stderr, append the line to the expected output
            if state == TestParseState::ReadingExpectedStdout {
                expected_stdout += strip_prefix(line, &config.test_line_prefix);
                expected_stdout += "\n";
            } else if state == TestParseState::ReadingExpectedStderr {
                expected_stderr += strip_prefix(line, &config.test_line_prefix);
                expected_stderr += "\n";

            // Otherwise, look to see if the line begins with a keyword and if so change state
            // (stdout/stderr) or parse an argument to the keyword (args/exit status).

            // args:
            } else if line.starts_with(&config.test_args_prefix) {
                command_line_args = strip_prefix(line, &config.test_args_prefix).to_string();

            // expected stdout:
            } else if line.starts_with(&config.test_stdout_prefix) {
                state = TestParseState::ReadingExpectedStdout;
                // Append the remainder of the line to the expected stdout.
                // Both expected_stdout and expected_stderr are trimmed so extra spaces if this is
                // empty shouldn't matter.
                expected_stdout += &(strip_prefix(line, &config.test_stdout_prefix).to_string() + "\n");

            // expected stderr:
            } else if line.starts_with(&config.test_stderr_prefix) {
                state = TestParseState::ReadingExpectedStderr;
                expected_stderr += &(strip_prefix(line, &config.test_stderr_prefix).to_string() + "\n");

            // expected exit status:
            } else if line.starts_with(&config.test_exit_status_prefix) {
                let status = strip_prefix(line, &config.test_exit_status_prefix).trim();
                expected_exit_status = Some(status.parse().map_err(TestError::ErrorParsingExitStatus)?);
            }
        } else {
            state = TestParseState::Neutral;
        }
    }

    // Remove \r from strings for windows compatibility. This means we
    // also can't test for any string containing "\r" unless this check
    // is improved to be more clever (e.g. only removing at the end of a line).
    let expected_stdout = expected_stdout.replace("\r", "");
    let expected_stderr = expected_stderr.replace("\r", "");

    Ok(Test { path, command_line_args, expected_stdout, expected_stderr, expected_exit_status })
}

/// Diff the given "stream" and expected contents of the stream.
/// Returns non-zero on error.
fn check_for_differences_in_stream(name: &str, path: &Path, stream: &[u8], expected: &str) -> i8 {
    let output_string = String::from_utf8_lossy(stream).replace("\r", "");
    let output = output_string.trim();
    let expected = expected.trim();

    let differences = TextDiff::from_lines(expected, output);
    if differences.ratio() != 1.0 {
        println!("\n{}: Actual {} differs from expected {}:\n{}",
            path.to_string_lossy().bright_yellow(), name, name, DiffPrinter(differences));
        1
    } else {
        0
    }
}

fn check_for_differences(path: &Path, output: &Output, test: &Test) -> bool {
    let mut error_count = 0;
    if let Some(expected_status) = test.expected_exit_status {
        if let Some(actual_status) = output.status.code() {
            if expected_status != actual_status {
                error_count += 1;
                println!("\n{}: Expected an exit status of {} but process returned {}",
                       path.to_string_lossy().bright_yellow(), expected_status, actual_status);
            }
        } else {
            error_count += 1;
            println!("\n{}: Expected an exit status of {} but process was terminated by signal instead",
                    path.to_string_lossy().bright_yellow(), expected_status);
        }
    }

    error_count += check_for_differences_in_stream("stdout", path, &output.stdout, &test.expected_stdout);
    error_count += check_for_differences_in_stream("stderr", path, &output.stderr, &test.expected_stderr);
    error_count != 0
}

macro_rules! print_verbose {($config:expr, $($output:tt)*) => {
    if $config.verbose {
        print!($($output)*)
    }
};}

impl TestConfig {
    /// Recurse through all the files in self.path, parse them all,
    /// and run the target program with the arguments specified in the file.
    pub fn run_tests(&self) -> TestResult<()> {
        let files = find_tests(&self.test_path)?;
        let tests = files.iter()
            .map(|file| parse_test(file, self))
            .collect::<TestResult<Vec<_>>>()?;

        let (mut failing_tests, mut total_tests) = (0, 0);
        for test in tests {
            print_verbose!(self, "testing {}... ", &test.path.to_string_lossy().bright_yellow());
            let mut args = vec![];

            // Avoid pushing an empty '' arg at the beginning
            let trimmed_args = test.command_line_args.trim();
            if !trimmed_args.is_empty() {
                args = shlex::split(trimmed_args).unwrap();
            }

            args.push(test.path.to_string_lossy().to_string());

            let output = Command::new(&self.binary_path).args(args).output().map_err(TestError::IoError)?;
            let new_error = check_for_differences(&test.path, &output, &test);

            total_tests += 1;
            if new_error {
                failing_tests += 1;
                print_verbose!(self, "{}\n", "failed\n".red());
            } else {
                print_verbose!(self, "{}\n", "ok".green());
            }
        }

        println!(
            "ran {} {} tests with {} and {}\n",
            total_tests,
            "golden".bright_yellow(),
            format!("{} passing", total_tests - failing_tests).green(),
            format!("{} failing", failing_tests).red(),
        );

        if failing_tests != 0 {
            Err(TestError::ExpectedOutputDiffers)
        } else {
            Ok(())
        }
    }
}