1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
//! Routines for running tests.

mod find_files;
mod print;
mod test_evaluator;

use Config;
use model::*;

use self::test_evaluator::TestEvaluator;

#[derive(Clone,Debug,PartialEq,Eq)]
struct Context
{
    pub exec_search_dirs: Vec<String>,
    pub test_files: Vec<TestFile>,
}

/// Runs all tests according to a given config.
///
/// Return `Ok` if all tests pass, and `Err` otherwise.
///
/// # Parameters
///
/// * `config_fn` is a function which sets up the test config.
pub fn tests<F>(config_fn: F) -> Result<(), ()>
    where F: Fn(&mut Config) {
    let mut config = Config::default();
    config_fn(&mut config);

    if config.test_paths.is_empty() {
        util::abort("no test paths given to lit")
    }

    let test_paths = match find_files::with_config(&config) {
        Ok(paths) => paths,
        Err(e) => util::abort(format!("could not find files: {}", e)),
    };

    if test_paths.is_empty() {
        print::warning("could not find any tests");
        return Err(());
    }

    let mut context = test_paths.into_iter().fold(Context::new(), |c,file| {
        let test = util::parse_test(&file).unwrap();
        c.test(test)
    });

    match util::crate_dir() {
        Some(dir) => context.add_search_dir(dir),
        None => print::warning("could not find tool directory"),
    }

    let results = context.run(&config);
    let erroneous_results = results.iter().filter(|r| r.kind.is_erroneous());

    for result in results.iter() {
        print::result(result, true)
    }

    if erroneous_results.clone().next().is_some() {
        print::text("");
        print::text("Failing tests:");
        print::text("");

        for result in erroneous_results {
            print::result(result, false);
        }
    }

    // Cargo test will continue with whatever color we leave.
    print::reset_colors();

    let has_failure = results.iter().any(|r| r.kind.is_erroneous());
    if !has_failure { Ok(()) } else { Err(()) }
}

pub fn test_file(test_file: &TestFile, config: &Config) -> TestResult {
    if test_file.is_empty() {
        return TestResult {
            path: test_file.path.clone(),
            kind: TestResultKind::Skip,
        }
    }

    for test_evaluator in create_test_evaluators(&test_file) {
        let kind = test_evaluator.run(test_file, config);

        match kind {
            TestResultKind::Pass => continue,
            TestResultKind::Skip => {
                return TestResult {
                    path: test_file.path.clone(),
                    kind: TestResultKind::Pass,
                }
            },
            _ => {
                return TestResult {
                    path: test_file.path.clone(),
                    kind,
                }
            },
        }
    }

    TestResult {
        path: test_file.path.clone(),
        kind: TestResultKind::Pass,
    }
}

fn create_test_evaluators(test_file: &TestFile) -> Vec<TestEvaluator> {
    test_file.directives.iter().flat_map(|directive| {
        if let Command::Run(ref invocation) = directive.command {
            Some(TestEvaluator::new(invocation.clone()))
        } else {
            None
        }
    }).collect()
}


mod util
{
    use model::*;
    use super::print;
    use parse;

    use std::error::Error;
    use std::io::Read;
    use std;

    pub fn crate_dir() -> Option<String> {
        let current_exec = match std::env::current_exe() {
            Ok(e) => e,
            Err(e) => abort(
                format!("failed to get current executable path: {}", e)),
        };

        current_exec.parent().map(|p| p.to_str().unwrap().to_owned())
    }

    pub fn parse_test(file_name: &str) -> Result<TestFile,String> {
        let mut text = String::new();
        open_file(file_name).read_to_string(&mut text).unwrap();
        parse::test_file(file_name, text.chars())
    }

    fn open_file(path: &str) -> std::fs::File {
        match std::fs::File::open(path) {
            Ok(f) => f,
            Err(e) => abort(format!("could not open {}: {}",
                                    path, e.description())),
        }
    }
    pub fn abort<S>(msg: S) -> !
        where S: Into<String> {
        print::failure(format!("error: {}", msg.into()));

        std::process::exit(1);
    }
}

impl Context
{
    pub fn new() -> Self {
        Context {
            exec_search_dirs: Vec::new(),
            test_files: Vec::new(),
        }
    }

    pub fn test(mut self, test_file: TestFile) -> Self {
        self.test_files.push(test_file);
        self
    }

    pub fn run(&self, config: &Config) -> Results {
        let test_results = self.test_files.iter().map(|test_file| {
            self::test_file(test_file, config)
        }).collect();

        Results { test_results }
    }

    pub fn add_search_dir(&mut self, dir: String) {
        self.exec_search_dirs.push(dir);
    }
}