var searchIndex = {}; searchIndex["test"] = {"doc":"Support code for rustc's built in unit-test and micro-benchmarking\nframework.","items":[[3,"Bencher","test","Manager of the benchmarking runs.",null,null],[12,"bytes","","",0,null],[3,"TestDesc","","",null,null],[12,"name","","",1,null],[12,"ignore","","",1,null],[12,"should_panic","","",1,null],[3,"TestDescAndFn","","",null,null],[12,"desc","","",2,null],[12,"testfn","","",2,null],[3,"Metric","","",null,null],[3,"MetricMap","","",null,null],[3,"TestOpts","","",null,null],[12,"filter","","",3,null],[12,"run_ignored","","",3,null],[12,"run_tests","","",3,null],[12,"bench_benchmarks","","",3,null],[12,"logfile","","",3,null],[12,"nocapture","","",3,null],[12,"color","","",3,null],[12,"verbose","","",3,null],[3,"BenchSamples","","",null,null],[4,"TestName","","",null,null],[13,"StaticTestName","","",4,null],[13,"DynTestName","","",4,null],[4,"TestFn","","",null,null],[13,"StaticTestFn","","",5,null],[13,"StaticBenchFn","","",5,null],[13,"StaticMetricFn","","",5,null],[13,"DynTestFn","","",5,null],[13,"DynMetricFn","","",5,null],[13,"DynBenchFn","","",5,null],[4,"ShouldPanic","","",null,null],[13,"No","","",6,null],[13,"Yes","","",6,null],[13,"YesWithMessage","","",6,null],[4,"ColorConfig","","",null,null],[13,"AutoColor","","",7,null],[13,"AlwaysColor","","",7,null],[13,"NeverColor","","",7,null],[4,"TestResult","","",null,null],[13,"TrOk","","",8,null],[13,"TrFailed","","",8,null],[13,"TrIgnored","","",8,null],[13,"TrMetrics","","",8,null],[13,"TrBench","","",8,null],[5,"test_main","","",null,null],[5,"test_main_static","","",null,null],[5,"parse_opts","","",null,null],[5,"fmt_bench_samples","","",null,{"inputs":[{"name":"benchsamples"}],"output":{"name":"string"}}],[5,"run_tests_console","","",null,{"inputs":[{"name":"testopts"},{"name":"vec"}],"output":{"name":"result"}}],[5,"filter_tests","","",null,{"inputs":[{"name":"testopts"},{"name":"vec"}],"output":{"name":"vec"}}],[5,"convert_benchmarks_to_tests","","",null,{"inputs":[{"name":"vec"}],"output":{"name":"vec"}}],[5,"run_test","","",null,{"inputs":[{"name":"testopts"},{"name":"bool"},{"name":"testdescandfn"},{"name":"sender"}],"output":null}],[5,"black_box","","",null,{"inputs":[{"name":"t"}],"output":{"name":"t"}}],[0,"test","","",null,null],[0,"stats","","",null,null],[3,"Summary","test::stats","Extracted collection of all the summary statistics of a sample set.",null,null],[12,"sum","","",9,null],[12,"min","","",9,null],[12,"max","","",9,null],[12,"mean","","",9,null],[12,"median","","",9,null],[12,"var","","",9,null],[12,"std_dev","","",9,null],[12,"std_dev_pct","","",9,null],[12,"median_abs_dev","","",9,null],[12,"median_abs_dev_pct","","",9,null],[12,"quartiles","","",9,null],[12,"iqr","","",9,null],[5,"winsorize","","Winsorize a set of samples, replacing values above the `100-pct` percentile\nand below the `pct` percentile with those percentiles themselves. This is a\nway of minimizing the effect of outliers, at the cost of biasing the sample.\nIt differs from trimming in that it does not change the number of samples,\njust changes the values of those that are outliers.",null,null],[8,"Stats","","Trait that provides simple descriptive statistics on a univariate set of numeric samples.",null,null],[10,"sum","","Sum of the samples.",10,null],[10,"min","","Minimum value of the samples.",10,null],[10,"max","","Maximum value of the samples.",10,null],[10,"mean","","Arithmetic mean (average) of the samples: sum divided by sample-count.",10,null],[10,"median","","Median of the samples: value separating the lower half of the samples from the higher half.\nEqual to `self.percentile(50.0)`.",10,null],[10,"var","","Variance of the samples: bias-corrected mean of the squares of the differences of each\nsample from the sample mean. Note that this calculates the _sample variance_ rather than the\npopulation variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n`\nbias that would appear if we calculated a population variance, by dividing by `(n-1)` rather\nthan `n`.",10,null],[10,"std_dev","","Standard deviation: the square root of the sample variance.",10,null],[10,"std_dev_pct","","Standard deviation as a percent of the mean value. See `std_dev` and `mean`.",10,null],[10,"median_abs_dev","","Scaled median of the absolute deviations of each sample from the sample median. This is a\nrobust (distribution-agnostic) estimator of sample variability. Use this in preference to\n`std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled\nby the constant `1.4826` to allow its use as a consistent estimator for the standard\ndeviation.",10,null],[10,"median_abs_dev_pct","","Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`.",10,null],[10,"percentile","","Percentile: the value below which `pct` percent of the values in `self` fall. For example,\npercentile(95.0) will return the value `v` such that 95% of the samples `s` in `self`\nsatisfy `s <= v`.",10,null],[10,"quartiles","","Quartiles of the sample: three values that divide the sample into four equal groups, each\nwith 1/4 of the data. The middle value is the median. See `median` and `percentile`. This\nfunction may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but\nis otherwise equivalent.",10,null],[10,"iqr","","Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th\npercentile (3rd quartile). See `quartiles`.",10,null],[11,"eq","","",9,null],[11,"ne","","",9,null],[11,"clone","","",9,null],[11,"new","","Construct a new summary of a sample set.",9,null],[0,"bench","test","",null,null],[5,"benchmark","test::bench","",null,{"inputs":[{"name":"f"}],"output":{"name":"benchsamples"}}],[5,"run_once","","",null,{"inputs":[{"name":"f"}],"output":null}],[6,"OptRes","test","Result of parsing the options.",null,null],[6,"MonitorMsg","","",null,null],[8,"TDynBenchFn","","Represents a benchmark function.",null,null],[10,"run","","",11,null],[11,"fmt","","",4,null],[11,"hash","","",4,null],[11,"eq","","",4,null],[11,"ne","","",4,null],[11,"clone","","",4,null],[11,"fmt","","",4,null],[11,"dyn_test_fn","","",5,{"inputs":[{"name":"f"}],"output":{"name":"self"}}],[11,"dyn_metric_fn","","",5,{"inputs":[{"name":"f"}],"output":{"name":"self"}}],[11,"fmt","","",5,null],[11,"clone","","",0,null],[11,"hash","","",6,null],[11,"eq","","",6,null],[11,"ne","","",6,null],[11,"fmt","","",6,null],[11,"clone","","",6,null],[11,"hash","","",1,null],[11,"eq","","",1,null],[11,"ne","","",1,null],[11,"fmt","","",1,null],[11,"clone","","",1,null],[11,"fmt","","",2,null],[11,"fmt","","",12,null],[11,"eq","","",12,null],[11,"ne","","",12,null],[11,"decode","","",12,{"inputs":[{"name":"__d"}],"output":{"name":"result"}}],[11,"encode","","",12,null],[11,"clone","","",12,null],[11,"new","","",12,{"inputs":[{"name":"f64"},{"name":"f64"}],"output":{"name":"metric"}}],[11,"eq","","",13,null],[11,"ne","","",13,null],[11,"clone","","",13,null],[11,"clone","","",7,null],[11,"eq","","",14,null],[11,"ne","","",14,null],[11,"clone","","",14,null],[11,"eq","","",8,null],[11,"ne","","",8,null],[11,"clone","","",8,null],[11,"new","","",13,{"inputs":[],"output":{"name":"metricmap"}}],[11,"insert_metric","","Insert a named `value` (+/- `noise`) metric into the map. The value\nmust be non-negative. The `noise` indicates the uncertainty of the\nmetric, which doubles as the "noise range" of acceptable\npairwise-regressions on this named value, when comparing from one\nmetric to the next using `compare_to_old`.",13,null],[11,"fmt_metrics","","",13,null],[11,"iter","","Callback for benchmark functions to run in their body.",0,null],[11,"ns_elapsed","","",0,null],[11,"ns_per_iter","","",0,null],[11,"bench_n","","",0,null],[11,"auto_bench","","",0,null]],"paths":[[3,"Bencher"],[3,"TestDesc"],[3,"TestDescAndFn"],[3,"TestOpts"],[4,"TestName"],[4,"TestFn"],[4,"ShouldPanic"],[4,"ColorConfig"],[4,"TestResult"],[3,"Summary"],[8,"Stats"],[8,"TDynBenchFn"],[3,"Metric"],[3,"MetricMap"],[3,"BenchSamples"]]}; initSearch(searchIndex);