SwiftyOpenCC // Clang-Format.

This commit is contained in:
ShikiSuen 2022-04-11 16:58:42 +08:00
parent 9e5ced8055
commit 9a2caf2e44
436 changed files with 77727 additions and 63883 deletions

File diff suppressed because it is too large Load Diff

View File

@ -4,44 +4,47 @@
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#include "pybind11/stl.h" #include "pybind11/stl.h"
namespace { namespace
{
namespace py = ::pybind11; namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) { std::vector<std::string> Initialize(const std::vector<std::string> &argv)
// The `argv` pointers here become invalid when this function returns, but {
// benchmark holds the pointer to `argv[0]`. We create a static copy of it // The `argv` pointers here become invalid when this function returns, but
// so it persists, and replace the pointer below. // benchmark holds the pointer to `argv[0]`. We create a static copy of it
static std::string executable_name(argv[0]); // so it persists, and replace the pointer below.
std::vector<char*> ptrs; static std::string executable_name(argv[0]);
ptrs.reserve(argv.size()); std::vector<char *> ptrs;
for (auto& arg : argv) { ptrs.reserve(argv.size());
ptrs.push_back(const_cast<char*>(arg.c_str())); for (auto &arg : argv)
} {
ptrs[0] = const_cast<char*>(executable_name.c_str()); ptrs.push_back(const_cast<char *>(arg.c_str()));
int argc = static_cast<int>(argv.size()); }
benchmark::Initialize(&argc, ptrs.data()); ptrs[0] = const_cast<char *>(executable_name.c_str());
std::vector<std::string> remaining_argv; int argc = static_cast<int>(argv.size());
remaining_argv.reserve(argc); benchmark::Initialize(&argc, ptrs.data());
for (int i = 0; i < argc; ++i) { std::vector<std::string> remaining_argv;
remaining_argv.emplace_back(ptrs[i]); remaining_argv.reserve(argc);
} for (int i = 0; i < argc; ++i)
return remaining_argv; {
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
} }
void RegisterBenchmark(const char* name, py::function f) { void RegisterBenchmark(const char *name, py::function f)
benchmark::RegisterBenchmark(name, [f](benchmark::State& state) { {
f(&state); benchmark::RegisterBenchmark(name, [f](benchmark::State &state) { f(&state); });
});
} }
PYBIND11_MODULE(_benchmark, m) { PYBIND11_MODULE(_benchmark, m)
m.def("Initialize", Initialize); {
m.def("RegisterBenchmark", RegisterBenchmark); m.def("Initialize", Initialize);
m.def("RunSpecifiedBenchmarks", m.def("RegisterBenchmark", RegisterBenchmark);
[]() { benchmark::RunSpecifiedBenchmarks(); }); m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); });
py::class_<benchmark::State>(m, "State") py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning) .def("__bool__", &benchmark::State::KeepRunning)
.def_property_readonly("keep_running", &benchmark::State::KeepRunning); .def_property_readonly("keep_running", &benchmark::State::KeepRunning);
}; };
} // namespace } // namespace

View File

@ -1,12 +1,13 @@
#include <gnuregex.h> #include <gnuregex.h>
#include <string> #include <string>
int main() { int main()
std::string str = "test0159"; {
regex_t re; std::string str = "test0159";
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); regex_t re;
if (ec != 0) { int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
return ec; if (ec != 0)
} {
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
} }

View File

@ -1,14 +1,15 @@
#include <regex.h> #include <regex.h>
#include <string> #include <string>
int main() { int main()
std::string str = "test0159"; {
regex_t re; std::string str = "test0159";
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); regex_t re;
if (ec != 0) { int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
return ec; if (ec != 0)
} {
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; return ec;
regfree(&re); }
return ret; int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
} }

View File

@ -1,10 +1,9 @@
#include <regex> #include <regex>
#include <string> #include <string>
int main() { int main()
const std::string str = "test0159"; {
std::regex re; const std::string str = "test0159";
re = std::regex("^[a-z]+[0-9]+$", std::regex re;
std::regex_constants::extended | std::regex_constants::nosubs); re = std::regex("^[a-z]+[0-9]+$", std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1; return std::regex_search(str, re) ? 0 : -1;
} }

View File

@ -1,6 +1,7 @@
#include <chrono> #include <chrono>
int main() { int main()
{
typedef std::chrono::steady_clock Clock; typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now(); Clock::time_point tp = Clock::now();
((void)tp); ((void)tp);

View File

@ -1,4 +1,6 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES #define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h" #include "../src/mutex.h"
int main() {} int main()
{
}

View File

@ -1,13 +1,15 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
void BM_StringCreation(benchmark::State& state) { void BM_StringCreation(benchmark::State &state)
{
while (state.KeepRunning()) while (state.KeepRunning())
std::string empty_string; std::string empty_string;
} }
BENCHMARK(BM_StringCreation); BENCHMARK(BM_StringCreation);
void BM_StringCopy(benchmark::State& state) { void BM_StringCopy(benchmark::State &state)
{
std::string x = "hello"; std::string x = "hello";
while (state.KeepRunning()) while (state.KeepRunning())
std::string copy(x); std::string copy(x);

View File

@ -3,8 +3,10 @@
#include "internal_macros.h" #include "internal_macros.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// The arraysize(arr) macro returns the # of elements in an array arr. // The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be // The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on // used in defining new arrays, for example. If you use arraysize on
@ -14,20 +16,18 @@ namespace internal {
// This template function declaration is used in defining arraysize. // This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only // Note that the function doesn't need an implementation, as we only
// use its type. // use its type.
template <typename T, size_t N> template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for // That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of // its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier. // template overloads: the final frontier.
#ifndef COMPILER_MSVC #ifndef COMPILER_MSVC
template <typename T, size_t N> template <typename T, size_t N> char (&ArraySizeHelper(const T (&array)[N]))[N];
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif #endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) #define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_ARRAYSIZE_H_ #endif // BENCHMARK_ARRAYSIZE_H_

View File

@ -106,40 +106,34 @@ DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output // The level of verbose logging to output
DEFINE_int32(v, 0); DEFINE_int32(v, 0);
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
// FIXME: wouldn't LTO mess this up? // FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {} void UseCharPointer(char const volatile *)
{
}
} // namespace internal } // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t>& ranges, State::State(IterationCount max_iters, const std::vector<int64_t> &ranges, int thread_i, int n_threads,
int thread_i, int n_threads, internal::ThreadTimer* timer, internal::ThreadTimer *timer, internal::ThreadManager *manager)
internal::ThreadManager* manager) : total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false),
: total_iterations_(0), error_occurred_(false), range_(ranges), complexity_n_(0), counters(), thread_index(thread_i), threads(n_threads),
batch_leftover_(0), timer_(timer), manager_(manager)
max_iterations(max_iters), {
started_(false), CHECK(max_iterations != 0) << "At least one iteration must be run";
finished_(false), CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
error_occurred_(false),
range_(ranges),
complexity_n_(0),
counters(),
thread_index(thread_i),
threads(n_threads),
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17 // Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers // because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is // currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined // demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof, // behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed. // which must be suppressed.
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#pragma warning push #pragma warning push
#pragma warning(disable : 1875) #pragma warning(disable : 1875)
@ -147,11 +141,9 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof" #pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif #endif
// Offset tests to ensure commonly accessed data is on the first cache line. // Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64; const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <= static_assert(offsetof(State, error_occurred_) <= (cache_line_size - sizeof(error_occurred_)), "");
(cache_line_size - sizeof(error_occurred_)),
"");
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#pragma warning pop #pragma warning pop
#elif defined(__GNUC__) #elif defined(__GNUC__)
@ -159,128 +151,144 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif #endif
} }
void State::PauseTiming() { void State::PauseTiming()
// Add in time accumulated so far {
CHECK(started_ && !finished_ && !error_occurred_); // Add in time accumulated so far
timer_->StopTimer(); CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
} }
void State::ResumeTiming() { void State::ResumeTiming()
CHECK(started_ && !finished_ && !error_occurred_); {
timer_->StartTimer(); CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
} }
void State::SkipWithError(const char* msg) { void State::SkipWithError(const char *msg)
CHECK(msg); {
error_occurred_ = true; CHECK(msg);
{ error_occurred_ = true;
MutexLock l(manager_->GetBenchmarkMutex()); {
if (manager_->results.has_error_ == false) { MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.error_message_ = msg; if (manager_->results.has_error_ == false)
manager_->results.has_error_ = true; {
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
} }
} total_iterations_ = 0;
total_iterations_ = 0; if (timer_->running())
if (timer_->running()) timer_->StopTimer(); timer_->StopTimer();
} }
void State::SetIterationTime(double seconds) { void State::SetIterationTime(double seconds)
timer_->SetIterationTime(seconds); {
timer_->SetIterationTime(seconds);
} }
void State::SetLabel(const char* label) { void State::SetLabel(const char *label)
MutexLock l(manager_->GetBenchmarkMutex()); {
manager_->results.report_label_ = label; MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
} }
void State::StartKeepRunning() { void State::StartKeepRunning()
CHECK(!started_ && !finished_); {
started_ = true; CHECK(!started_ && !finished_);
total_iterations_ = error_occurred_ ? 0 : max_iterations; started_ = true;
manager_->StartStopBarrier(); total_iterations_ = error_occurred_ ? 0 : max_iterations;
if (!error_occurred_) ResumeTiming(); manager_->StartStopBarrier();
if (!error_occurred_)
ResumeTiming();
} }
void State::FinishKeepRunning() { void State::FinishKeepRunning()
CHECK(started_ && (!finished_ || error_occurred_)); {
if (!error_occurred_) { CHECK(started_ && (!finished_ || error_occurred_));
PauseTiming(); if (!error_occurred_)
} {
// Total iterations has now wrapped around past 0. Fix this. PauseTiming();
total_iterations_ = 0; }
finished_ = true; // Total iterations has now wrapped around past 0. Fix this.
manager_->StartStopBarrier(); total_iterations_ = 0;
finished_ = true;
manager_->StartStopBarrier();
} }
namespace internal { namespace internal
namespace { {
namespace
{
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks, void RunBenchmarks(const std::vector<BenchmarkInstance> &benchmarks, BenchmarkReporter *display_reporter,
BenchmarkReporter* display_reporter, BenchmarkReporter *file_reporter)
BenchmarkReporter* file_reporter) { {
// Note the file_reporter can be null. // Note the file_reporter can be null.
CHECK(display_reporter != nullptr); CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10. // Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10; size_t name_field_width = 10;
size_t stat_field_width = 0; size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) { for (const BenchmarkInstance &benchmark : benchmarks)
name_field_width = {
std::max<size_t>(name_field_width, benchmark.name.str().size()); name_field_width = std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1; might_have_aggregates |= benchmark.repetitions > 1;
for (const auto& Stat : *benchmark.statistics) for (const auto &Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size()); stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
} }
if (might_have_aggregates) name_field_width += 1 + stat_field_width; if (might_have_aggregates)
name_field_width += 1 + stat_field_width;
// Print header here // Print header here
BenchmarkReporter::Context context; BenchmarkReporter::Context context;
context.name_field_width = name_field_width; context.name_field_width = name_field_width;
// Keep track of running times of all instances of current benchmark // Keep track of running times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports; std::vector<BenchmarkReporter::Run> complexity_reports;
// We flush streams after invoking reporter methods that write to them. This // We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered. // ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter* reporter) { auto flushStreams = [](BenchmarkReporter *reporter) {
if (!reporter) return; if (!reporter)
std::flush(reporter->GetOutputStream()); return;
std::flush(reporter->GetErrorStream()); std::flush(reporter->GetOutputStream());
}; std::flush(reporter->GetErrorStream());
};
if (display_reporter->ReportContext(context) && if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context)))
(!file_reporter || file_reporter->ReportContext(context))) { {
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto &benchmark : benchmarks)
{
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter *reporter, bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
if (!report_aggregates_only)
reporter->ReportRuns(run_results.non_aggregates);
if (!run_results.aggregates_only.empty())
reporter->ReportRuns(run_results.aggregates_only);
};
report(display_reporter, run_results.display_report_aggregates_only);
if (file_reporter)
report(file_reporter, run_results.file_report_aggregates_only);
flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter)
file_reporter->Finalize();
flushStreams(display_reporter); flushStreams(display_reporter);
flushStreams(file_reporter); flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) {
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter* reporter,
bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
if (!report_aggregates_only)
reporter->ReportRuns(run_results.non_aggregates);
if (!run_results.aggregates_only.empty())
reporter->ReportRuns(run_results.aggregates_only);
};
report(display_reporter, run_results.display_report_aggregates_only);
if (file_reporter)
report(file_reporter, run_results.file_report_aggregates_only);
flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
flushStreams(display_reporter);
flushStreams(file_reporter);
} }
// Disable deprecated warnings temporarily because we need to reference // Disable deprecated warnings temporarily because we need to reference
@ -290,210 +298,241 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif #endif
std::unique_ptr<BenchmarkReporter> CreateReporter( std::unique_ptr<BenchmarkReporter> CreateReporter(std::string const &name, ConsoleReporter::OutputOptions output_opts)
std::string const& name, ConsoleReporter::OutputOptions output_opts) { {
typedef std::unique_ptr<BenchmarkReporter> PtrType; typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") { if (name == "console")
return PtrType(new ConsoleReporter(output_opts)); {
} else if (name == "json") { return PtrType(new ConsoleReporter(output_opts));
return PtrType(new JSONReporter); }
} else if (name == "csv") { else if (name == "json")
return PtrType(new CSVReporter); {
} else { return PtrType(new JSONReporter);
std::cerr << "Unexpected format: '" << name << "'\n"; }
std::exit(1); else if (name == "csv")
} {
return PtrType(new CSVReporter);
}
else
{
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
} }
#ifdef __GNUC__ #ifdef __GNUC__
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
} // end namespace } // end namespace
bool IsZero(double n) { bool IsZero(double n)
return std::abs(n) < std::numeric_limits<double>::epsilon(); {
return std::abs(n) < std::numeric_limits<double>::epsilon();
} }
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color)
int output_opts = ConsoleReporter::OO_Defaults; {
auto is_benchmark_color = [force_no_color]() -> bool { int output_opts = ConsoleReporter::OO_Defaults;
if (force_no_color) { auto is_benchmark_color = [force_no_color]() -> bool {
return false; if (force_no_color)
{
return false;
}
if (FLAGS_benchmark_color == "auto")
{
return IsColorTerminal();
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color())
{
output_opts |= ConsoleReporter::OO_Color;
} }
if (FLAGS_benchmark_color == "auto") { else
return IsColorTerminal(); {
output_opts &= ~ConsoleReporter::OO_Color;
} }
return IsTruthyFlagValue(FLAGS_benchmark_color); if (FLAGS_benchmark_counters_tabular)
}; {
if (is_benchmark_color()) { output_opts |= ConsoleReporter::OO_Tabular;
output_opts |= ConsoleReporter::OO_Color; }
} else { else
output_opts &= ~ConsoleReporter::OO_Color; {
} output_opts &= ~ConsoleReporter::OO_Tabular;
if (FLAGS_benchmark_counters_tabular) { }
output_opts |= ConsoleReporter::OO_Tabular; return static_cast<ConsoleReporter::OutputOptions>(output_opts);
} else {
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
} }
} // end namespace internal } // end namespace internal
size_t RunSpecifiedBenchmarks() { size_t RunSpecifiedBenchmarks()
return RunSpecifiedBenchmarks(nullptr, nullptr); {
return RunSpecifiedBenchmarks(nullptr, nullptr);
} }
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter)
return RunSpecifiedBenchmarks(display_reporter, nullptr); {
return RunSpecifiedBenchmarks(display_reporter, nullptr);
} }
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter, BenchmarkReporter *file_reporter)
BenchmarkReporter* file_reporter) { {
std::string spec = FLAGS_benchmark_filter; std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all") if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks spec = "."; // Regexp that matches all benchmarks
// Setup the reporters // Setup the reporters
std::ofstream output_file; std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter; std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter; std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) { if (!display_reporter)
default_display_reporter = internal::CreateReporter( {
FLAGS_benchmark_format, internal::GetOutputOptions()); default_display_reporter = internal::CreateReporter(FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get(); display_reporter = default_display_reporter.get();
}
auto& Out = display_reporter->GetOutputStream();
auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
} }
if (!file_reporter) { auto &Out = display_reporter->GetOutputStream();
default_file_reporter = internal::CreateReporter( auto &Err = display_reporter->GetErrorStream();
FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get(); std::string const &fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter)
{
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty())
{
output_file.open(fname);
if (!output_file.is_open())
{
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter)
{
default_file_reporter = internal::CreateReporter(FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
} }
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
std::vector<internal::BenchmarkInstance> benchmarks; std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; if (!FindBenchmarksInternal(spec, &benchmarks, &Err))
return 0;
if (benchmarks.empty()) { if (benchmarks.empty())
Err << "Failed to match any benchmarks against regex: " << spec << "\n"; {
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests)
{
for (auto const &benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
}
else
{
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager *manager)
{
internal::memory_manager = manager;
}
namespace internal
{
void PrintUsageAndExit()
{
fprintf(stdout, "benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int *argc, char **argv)
{
using namespace benchmark;
BenchmarkReporter::Context::executable_name = (argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i)
{
if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v))
{
for (int j = i; j != *argc - 1; ++j)
argv[j] = argv[j + 1];
--(*argc);
--i;
}
else if (IsFlag(argv[i], "help"))
{
PrintUsageAndExit();
}
}
for (auto const *flag : {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv")
{
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty())
{
PrintUsageAndExit();
}
}
int InitializeStreams()
{
static std::ios_base::Init init;
return 0; return 0;
}
if (FLAGS_benchmark_list_tests) {
for (auto const& benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
} else {
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
} }
void RegisterMemoryManager(MemoryManager* manager) { } // end namespace internal
internal::memory_manager = manager;
void Initialize(int *argc, char **argv)
{
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
} }
namespace internal { bool ReportUnrecognizedArguments(int argc, char **argv)
{
void PrintUsageAndExit() { for (int i = 1; i < argc; ++i)
fprintf(stdout, {
"benchmark" fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
&FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
--(*argc);
--i;
} else if (IsFlag(argv[i], "help")) {
PrintUsageAndExit();
} }
} return argc > 1;
for (auto const* flag :
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv") {
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
} }
int InitializeStreams() { } // end namespace benchmark
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int* argc, char** argv) {
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
argv[i]);
}
return argc > 1;
}
} // end namespace benchmark

View File

@ -1,15 +1,17 @@
#include "benchmark_api_internal.h" #include "benchmark_api_internal.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
State BenchmarkInstance::Run(IterationCount iters, int thread_id, State BenchmarkInstance::Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadTimer* timer, internal::ThreadManager *manager) const
internal::ThreadManager* manager) const { {
State st(iters, arg, thread_id, threads, timer, manager); State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st); benchmark->Run(st);
return st; return st;
} }
} // internal } // namespace internal
} // benchmark } // namespace benchmark

View File

@ -11,43 +11,44 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// Information kept per benchmark we may want to run // Information kept per benchmark we may want to run
struct BenchmarkInstance { struct BenchmarkInstance
BenchmarkName name; {
Benchmark* benchmark; BenchmarkName name;
AggregationReportMode aggregation_report_mode; Benchmark *benchmark;
std::vector<int64_t> arg; AggregationReportMode aggregation_report_mode;
TimeUnit time_unit; std::vector<int64_t> arg;
int range_multiplier; TimeUnit time_unit;
bool measure_process_cpu_time; int range_multiplier;
bool use_real_time; bool measure_process_cpu_time;
bool use_manual_time; bool use_real_time;
BigO complexity; bool use_manual_time;
BigOFunc* complexity_lambda; BigO complexity;
UserCounters counters; BigOFunc *complexity_lambda;
const std::vector<Statistics>* statistics; UserCounters counters;
bool last_benchmark_instance; const std::vector<Statistics> *statistics;
int repetitions; bool last_benchmark_instance;
double min_time; int repetitions;
IterationCount iterations; double min_time;
int threads; // Number of concurrent threads to us IterationCount iterations;
int threads; // Number of concurrent threads to us
State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, State Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager* manager) const; internal::ThreadManager *manager) const;
}; };
bool FindBenchmarksInternal(const std::string& re, bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool IsZero(double n); bool IsZero(double n);
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_API_INTERNAL_H #endif // BENCHMARK_API_INTERNAL_H

View File

@ -14,45 +14,53 @@
#include <benchmark/benchmark.h> #include <benchmark/benchmark.h>
namespace benchmark { namespace benchmark
{
namespace { namespace
{
// Compute the total size of a pack of std::strings // Compute the total size of a pack of std::strings
size_t size_impl() { return 0; } size_t size_impl()
{
return 0;
}
template <typename Head, typename... Tail> template <typename Head, typename... Tail> size_t size_impl(const Head &head, const Tail &...tail)
size_t size_impl(const Head& head, const Tail&... tail) { {
return head.size() + size_impl(tail...); return head.size() + size_impl(tail...);
} }
// Join a pack of std::strings using a delimiter // Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin // TODO: use absl::StrJoin
void join_impl(std::string&, char) {} void join_impl(std::string &, char)
{
}
template <typename Head, typename... Tail> template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head, void join_impl(std::string &s, const char delimiter, const Head &head, const Tail &...tail)
const Tail&... tail) { {
if (!s.empty() && !head.empty()) { if (!s.empty() && !head.empty())
s += delimiter; {
} s += delimiter;
}
s += head; s += head;
join_impl(s, delimiter, tail...); join_impl(s, delimiter, tail...);
} }
template <typename... Ts> template <typename... Ts> std::string join(char delimiter, const Ts &...ts)
std::string join(char delimiter, const Ts&... ts) { {
std::string s; std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...)); s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...); join_impl(s, delimiter, ts...);
return s; return s;
} }
} // namespace } // namespace
std::string BenchmarkName::str() const { std::string BenchmarkName::str() const
return join('/', function_name, args, min_time, iterations, repetitions, {
time_type, threads); return join('/', function_name, args, min_time, iterations, repetitions, time_type, threads);
} }
} // namespace benchmark } // namespace benchmark

View File

@ -52,17 +52,20 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
namespace { namespace
{
// For non-dense Range, intermediate values are powers of kRangeMultiplier. // For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8; static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat // The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration. // the benchmark on. If this is "large" then warn the user during configuration.
static const size_t kMaxFamilySize = 100; static const size_t kMaxFamilySize = 100;
} // end namespace } // end namespace
namespace internal { namespace internal
{
//=============================================================================// //=============================================================================//
// BenchmarkFamilies // BenchmarkFamilies
@ -70,437 +73,492 @@ namespace internal {
// Class for managing registered benchmarks. Note that each registered // Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run. // benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies { class BenchmarkFamilies
public: {
static BenchmarkFamilies* GetInstance(); public:
static BenchmarkFamilies *GetInstance();
// Registers a benchmark family and returns the index assigned to it. // Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family); size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Clear all registered benchmark families. // Clear all registered benchmark families.
void ClearBenchmarks(); void ClearBenchmarks();
// Extract the list of benchmark instances that match the specified // Extract the list of benchmark instances that match the specified
// regular expression. // regular expression.
bool FindBenchmarks(std::string re, bool FindBenchmarks(std::string re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
private: private:
BenchmarkFamilies() {} BenchmarkFamilies()
{
}
std::vector<std::unique_ptr<Benchmark>> families_; std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_; Mutex mutex_;
}; };
BenchmarkFamilies* BenchmarkFamilies::GetInstance() { BenchmarkFamilies *BenchmarkFamilies::GetInstance()
static BenchmarkFamilies instance; {
return &instance; static BenchmarkFamilies instance;
return &instance;
} }
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) { size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family)
MutexLock l(mutex_); {
size_t index = families_.size(); MutexLock l(mutex_);
families_.push_back(std::move(family)); size_t index = families_.size();
return index; families_.push_back(std::move(family));
return index;
} }
void BenchmarkFamilies::ClearBenchmarks() { void BenchmarkFamilies::ClearBenchmarks()
MutexLock l(mutex_); {
families_.clear(); MutexLock l(mutex_);
families_.shrink_to_fit(); families_.clear();
families_.shrink_to_fit();
} }
bool BenchmarkFamilies::FindBenchmarks( bool BenchmarkFamilies::FindBenchmarks(std::string spec, std::vector<BenchmarkInstance> *benchmarks,
std::string spec, std::vector<BenchmarkInstance>* benchmarks, std::ostream *ErrStream)
std::ostream* ErrStream) { {
CHECK(ErrStream); CHECK(ErrStream);
auto& Err = *ErrStream; auto &Err = *ErrStream;
// Make regular expression out of command-line flag // Make regular expression out of command-line flag
std::string error_msg; std::string error_msg;
Regex re; Regex re;
bool isNegativeFilter = false; bool isNegativeFilter = false;
if (spec[0] == '-') { if (spec[0] == '-')
spec.replace(0, 1, ""); {
isNegativeFilter = true; spec.replace(0, 1, "");
} isNegativeFilter = true;
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
// Family was deleted or benchmark doesn't match
if (!family) continue;
if (family->ArgsCnt() == -1) {
family->Args({});
} }
const std::vector<int>* thread_counts = if (!re.Init(spec, &error_msg))
(family->thread_counts_.empty() {
? &one_thread Err << "Could not compile benchmark re: " << error_msg << std::endl;
: &static_cast<const std::vector<int>&>(family->thread_counts_)); return false;
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) {
Err << "The number of inputs is very large. " << family->name_
<< " will be repeated at least " << family_size << " times.\n";
} }
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".") benchmarks->reserve(family_size);
for (auto const& args : family->args_) { // Special list of thread counts to use when none are specified
for (int num_threads : *thread_counts) { const std::vector<int> one_thread = {1};
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Add arguments to instance name MutexLock l(mutex_);
size_t arg_i = 0; for (std::unique_ptr<Benchmark> &family : families_)
for (auto const& arg : args) { {
if (!instance.name.args.empty()) { // Family was deleted or benchmark doesn't match
instance.name.args += '/'; if (!family)
} continue;
if (arg_i < family->arg_names_.size()) { if (family->ArgsCnt() == -1)
const auto& arg_name = family->arg_names_[arg_i]; {
if (!arg_name.empty()) { family->Args({});
instance.name.args += StrFormat("%s:", arg_name.c_str()); }
const std::vector<int> *thread_counts =
(family->thread_counts_.empty() ? &one_thread
: &static_cast<const std::vector<int> &>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize)
{
Err << "The number of inputs is very large. " << family->name_ << " will be repeated at least "
<< family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".")
benchmarks->reserve(family_size);
for (auto const &args : family->args_)
{
for (int num_threads : *thread_counts)
{
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Add arguments to instance name
size_t arg_i = 0;
for (auto const &arg : args)
{
if (!instance.name.args.empty())
{
instance.name.args += '/';
}
if (arg_i < family->arg_names_.size())
{
const auto &arg_name = family->arg_names_[arg_i];
if (!arg_name.empty())
{
instance.name.args += StrFormat("%s:", arg_name.c_str());
}
}
instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name.min_time = StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
{
instance.name.iterations =
StrFormat("iterations:%lu", static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions = StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_)
{
instance.name.time_type = "process_time";
}
if (family->use_manual_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
}
else if (family->use_real_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty())
{
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) || (!re.Match(full_name) && isNegativeFilter))
{
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
} }
}
instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
} }
if (!IsZero(family->min_time_))
instance.name.min_time =
StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0) {
instance.name.iterations =
StrFormat("iterations:%lu",
static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions =
StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_) {
instance.name.time_type = "process_time";
}
if (family->use_manual_time_) {
if (!instance.name.time_type.empty()) {
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
} else if (family->use_real_time_) {
if (!instance.name.time_type.empty()) {
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) ||
(!re.Match(full_name) && isNegativeFilter)) {
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
}
} }
} return true;
return true;
} }
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { Benchmark *RegisterBenchmarkInternal(Benchmark *bench)
std::unique_ptr<Benchmark> bench_ptr(bench); {
BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); std::unique_ptr<Benchmark> bench_ptr(bench);
families->AddBenchmark(std::move(bench_ptr)); BenchmarkFamilies *families = BenchmarkFamilies::GetInstance();
return bench; families->AddBenchmark(std::move(bench_ptr));
return bench;
} }
// FIXME: This function is a hack so that benchmark.cc can access // FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies` // `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re, bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err)
std::vector<BenchmarkInstance>* benchmarks, {
std::ostream* Err) { return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
} }
//=============================================================================// //=============================================================================//
// Benchmark // Benchmark
//=============================================================================// //=============================================================================//
Benchmark::Benchmark(const char* name) Benchmark::Benchmark(const char *name)
: name_(name), : name_(name), aggregation_report_mode_(ARM_Unspecified), time_unit_(kNanosecond),
aggregation_report_mode_(ARM_Unspecified), range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), repetitions_(0),
time_unit_(kNanosecond), measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone),
range_multiplier_(kRangeMultiplier), complexity_lambda_(nullptr)
min_time_(0), {
iterations_(0), ComputeStatistics("mean", StatisticsMean);
repetitions_(0), ComputeStatistics("median", StatisticsMedian);
measure_process_cpu_time_(false), ComputeStatistics("stddev", StatisticsStdDev);
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
} }
Benchmark::~Benchmark() {} Benchmark::~Benchmark()
{
Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
} }
Benchmark* Benchmark::Unit(TimeUnit unit) { Benchmark *Benchmark::Arg(int64_t x)
time_unit_ = unit; {
return this; CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
} }
Benchmark* Benchmark::Range(int64_t start, int64_t limit) { Benchmark *Benchmark::Unit(TimeUnit unit)
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); {
std::vector<int64_t> arglist; time_unit_ = unit;
AddRange(&arglist, start, limit, range_multiplier_); return this;
for (int64_t i : arglist) {
args_.push_back({i});
}
return this;
} }
Benchmark* Benchmark::Ranges( Benchmark *Benchmark::Range(int64_t start, int64_t limit)
const std::vector<std::pair<int64_t, int64_t>>& ranges) { {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size())); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<std::vector<int64_t>> arglists(ranges.size()); std::vector<int64_t> arglist;
std::size_t total = 1; AddRange(&arglist, start, limit, range_multiplier_);
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
total *= arglists[i].size();
}
std::vector<std::size_t> ctr(arglists.size(), 0); for (int64_t i : arglist)
{
args_.push_back({i});
}
return this;
}
for (std::size_t i = 0; i < total; i++) { Benchmark *Benchmark::Ranges(const std::vector<std::pair<int64_t, int64_t>> &ranges)
std::vector<int64_t> tmp; {
tmp.reserve(arglists.size()); CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
for (std::size_t j = 0; j < arglists.size(); j++) { std::size_t total = 1;
tmp.push_back(arglists[j].at(ctr[j])); for (std::size_t i = 0; i < ranges.size(); i++)
{
AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_);
total *= arglists[i].size();
} }
args_.push_back(std::move(tmp)); std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t j = 0; j < arglists.size(); j++) { for (std::size_t i = 0; i < total; i++)
if (ctr[j] + 1 < arglists[j].size()) { {
++ctr[j]; std::vector<int64_t> tmp;
break; tmp.reserve(arglists.size());
}
ctr[j] = 0; for (std::size_t j = 0; j < arglists.size(); j++)
{
tmp.push_back(arglists[j].at(ctr[j]));
}
args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++)
{
if (ctr[j] + 1 < arglists[j].size())
{
++ctr[j];
break;
}
ctr[j] = 0;
}
} }
} return this;
return this;
} }
Benchmark* Benchmark::ArgName(const std::string& name) { Benchmark *Benchmark::ArgName(const std::string &name)
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); {
arg_names_ = {name}; CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
return this; arg_names_ = {name};
return this;
} }
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) { Benchmark *Benchmark::ArgNames(const std::vector<std::string> &names)
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size())); {
arg_names_ = names; CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
return this; arg_names_ = names;
return this;
} }
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { Benchmark *Benchmark::DenseRange(int64_t start, int64_t limit, int step)
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); {
CHECK_LE(start, limit); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
for (int64_t arg = start; arg <= limit; arg += step) { CHECK_LE(start, limit);
args_.push_back({arg}); for (int64_t arg = start; arg <= limit; arg += step)
} {
return this; args_.push_back({arg});
}
return this;
} }
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) { Benchmark *Benchmark::Args(const std::vector<int64_t> &args)
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size())); {
args_.push_back(args); CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
return this; args_.push_back(args);
return this;
} }
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { Benchmark *Benchmark::Apply(void (*custom_arguments)(Benchmark *benchmark))
custom_arguments(this); {
return this; custom_arguments(this);
return this;
} }
Benchmark* Benchmark::RangeMultiplier(int multiplier) { Benchmark *Benchmark::RangeMultiplier(int multiplier)
CHECK(multiplier > 1); {
range_multiplier_ = multiplier; CHECK(multiplier > 1);
return this; range_multiplier_ = multiplier;
return this;
} }
Benchmark* Benchmark::MinTime(double t) { Benchmark *Benchmark::MinTime(double t)
CHECK(t > 0.0); {
CHECK(iterations_ == 0); CHECK(t > 0.0);
min_time_ = t; CHECK(iterations_ == 0);
return this; min_time_ = t;
return this;
} }
Benchmark* Benchmark::Iterations(IterationCount n) { Benchmark *Benchmark::Iterations(IterationCount n)
CHECK(n > 0); {
CHECK(IsZero(min_time_)); CHECK(n > 0);
iterations_ = n; CHECK(IsZero(min_time_));
return this; iterations_ = n;
return this;
} }
Benchmark* Benchmark::Repetitions(int n) { Benchmark *Benchmark::Repetitions(int n)
CHECK(n > 0); {
repetitions_ = n; CHECK(n > 0);
return this; repetitions_ = n;
return this;
} }
Benchmark* Benchmark::ReportAggregatesOnly(bool value) { Benchmark *Benchmark::ReportAggregatesOnly(bool value)
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; {
return this; aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
} }
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { Benchmark *Benchmark::DisplayAggregatesOnly(bool value)
// If we were called, the report mode is no longer 'unspecified', in any case. {
aggregation_report_mode_ = static_cast<AggregationReportMode>( // If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ | ARM_Default); aggregation_report_mode_ = static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_Default);
if (value) { if (value)
aggregation_report_mode_ = static_cast<AggregationReportMode>( {
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); aggregation_report_mode_ =
} else { static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
aggregation_report_mode_ = static_cast<AggregationReportMode>( }
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); else
} {
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
return this; return this;
} }
Benchmark* Benchmark::MeasureProcessCPUTime() { Benchmark *Benchmark::MeasureProcessCPUTime()
// Can be used together with UseRealTime() / UseManualTime(). {
measure_process_cpu_time_ = true; // Can be used together with UseRealTime() / UseManualTime().
return this; measure_process_cpu_time_ = true;
return this;
} }
Benchmark* Benchmark::UseRealTime() { Benchmark *Benchmark::UseRealTime()
CHECK(!use_manual_time_) {
<< "Cannot set UseRealTime and UseManualTime simultaneously."; CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true; use_real_time_ = true;
return this; return this;
} }
Benchmark* Benchmark::UseManualTime() { Benchmark *Benchmark::UseManualTime()
CHECK(!use_real_time_) {
<< "Cannot set UseRealTime and UseManualTime simultaneously."; CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true; use_manual_time_ = true;
return this; return this;
} }
Benchmark* Benchmark::Complexity(BigO complexity) { Benchmark *Benchmark::Complexity(BigO complexity)
complexity_ = complexity; {
return this; complexity_ = complexity;
return this;
} }
Benchmark* Benchmark::Complexity(BigOFunc* complexity) { Benchmark *Benchmark::Complexity(BigOFunc *complexity)
complexity_lambda_ = complexity; {
complexity_ = oLambda; complexity_lambda_ = complexity;
return this; complexity_ = oLambda;
return this;
} }
Benchmark* Benchmark::ComputeStatistics(std::string name, Benchmark *Benchmark::ComputeStatistics(std::string name, StatisticsFunc *statistics)
StatisticsFunc* statistics) { {
statistics_.emplace_back(name, statistics); statistics_.emplace_back(name, statistics);
return this; return this;
} }
Benchmark* Benchmark::Threads(int t) { Benchmark *Benchmark::Threads(int t)
CHECK_GT(t, 0); {
thread_counts_.push_back(t); CHECK_GT(t, 0);
return this; thread_counts_.push_back(t);
return this;
} }
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { Benchmark *Benchmark::ThreadRange(int min_threads, int max_threads)
CHECK_GT(min_threads, 0); {
CHECK_GE(max_threads, min_threads); CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2); AddRange(&thread_counts_, min_threads, max_threads, 2);
return this; return this;
} }
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, Benchmark *Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride)
int stride) { {
CHECK_GT(min_threads, 0); CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads); CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1); CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) { for (auto i = min_threads; i < max_threads; i += stride)
thread_counts_.push_back(i); {
} thread_counts_.push_back(i);
thread_counts_.push_back(max_threads); }
return this; thread_counts_.push_back(max_threads);
return this;
} }
Benchmark* Benchmark::ThreadPerCpu() { Benchmark *Benchmark::ThreadPerCpu()
thread_counts_.push_back(CPUInfo::Get().num_cpus); {
return this; thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
} }
void Benchmark::SetName(const char* name) { name_ = name; } void Benchmark::SetName(const char *name)
{
name_ = name;
}
int Benchmark::ArgsCnt() const { int Benchmark::ArgsCnt() const
if (args_.empty()) { {
if (arg_names_.empty()) return -1; if (args_.empty())
return static_cast<int>(arg_names_.size()); {
} if (arg_names_.empty())
return static_cast<int>(args_.front().size()); return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
} }
//=============================================================================// //=============================================================================//
// FunctionBenchmark // FunctionBenchmark
//=============================================================================// //=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); } void FunctionBenchmark::Run(State &st)
{
} // end namespace internal func_(st);
void ClearRegisteredBenchmarks() {
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
} }
} // end namespace benchmark } // end namespace internal
void ClearRegisteredBenchmarks()
{
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}
} // end namespace benchmark

View File

@ -5,103 +5,112 @@
#include "check.h" #include "check.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// Append the powers of 'mult' in the closed interval [lo, hi]. // Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range. // Returns iterator to the start of the inserted range.
template <typename T> template <typename T> typename std::vector<T>::iterator AddPowers(std::vector<T> *dst, T lo, T hi, int mult)
typename std::vector<T>::iterator {
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) { CHECK_GE(lo, 0);
CHECK_GE(lo, 0); CHECK_GE(hi, lo);
CHECK_GE(hi, lo); CHECK_GE(mult, 2);
CHECK_GE(mult, 2);
const size_t start_offset = dst->size(); const size_t start_offset = dst->size();
static const T kmax = std::numeric_limits<T>::max(); static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult" // Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult) { for (T i = 1; i <= hi; i *= mult)
if (i >= lo) { {
dst->push_back(i); if (i >= lo)
{
dst->push_back(i);
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult)
break;
} }
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult) break;
}
return dst->begin() + start_offset; return dst->begin() + start_offset;
} }
template <typename T> template <typename T> void AddNegatedPowers(std::vector<T> *dst, T lo, T hi, int mult)
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) { {
// We negate lo and hi so we require that they cannot be equal to 'min'. // We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min()); CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min()); CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo); CHECK_GE(hi, lo);
CHECK_LE(hi, 0); CHECK_LE(hi, 0);
// Add positive powers, then negate and reverse. // Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted // Casts necessary since small integers get promoted
// to 'int' when negating. // to 'int' when negating.
const auto lo_complement = static_cast<T>(-lo); const auto lo_complement = static_cast<T>(-lo);
const auto hi_complement = static_cast<T>(-hi); const auto hi_complement = static_cast<T>(-hi);
const auto it = AddPowers(dst, hi_complement, lo_complement, mult); const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T& t) { t *= -1; }); std::for_each(it, dst->end(), [](T &t) { t *= -1; });
std::reverse(it, dst->end()); std::reverse(it, dst->end());
} }
template <typename T> template <typename T> void AddRange(std::vector<T> *dst, T lo, T hi, int mult)
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) { {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value, static_assert(std::is_integral<T>::value && std::is_signed<T>::value, "Args type must be a signed integer");
"Args type must be a signed integer");
CHECK_GE(hi, lo); CHECK_GE(hi, lo);
CHECK_GE(mult, 2); CHECK_GE(mult, 2);
// Add "lo" // Add "lo"
dst->push_back(lo); dst->push_back(lo);
// Handle lo == hi as a special case, so we then know // Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1 // lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T. // from hi without falling outside of the range of T.
if (lo == hi) return; if (lo == hi)
return;
// Ensure that lo_inner <= hi_inner below. // Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi) { if (lo + 1 == hi)
dst->push_back(hi); {
return; dst->push_back(hi);
} return;
}
// Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive). // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
const auto lo_inner = static_cast<T>(lo + 1); const auto lo_inner = static_cast<T>(lo + 1);
const auto hi_inner = static_cast<T>(hi - 1); const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values // Insert negative values
if (lo_inner < 0) { if (lo_inner < 0)
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); {
} AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Treat 0 as a special case (see discussion on #762). // Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0) { if (lo <= 0 && hi >= 0)
dst->push_back(0); {
} dst->push_back(0);
}
// Insert positive values // Insert positive values
if (hi_inner > 0) { if (hi_inner > 0)
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); {
} AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Add "hi" (if different from last value). // Add "hi" (if different from last value).
if (hi != dst->back()) { if (hi != dst->back())
dst->push_back(hi); {
} dst->push_back(hi);
}
} }
} // namespace internal } // namespace internal
} // namespace benchmark } // namespace benchmark
#endif // BENCHMARK_REGISTER_H #endif // BENCHMARK_REGISTER_H

View File

@ -51,312 +51,324 @@
#include "thread_manager.h" #include "thread_manager.h"
#include "thread_timer.h" #include "thread_timer.h"
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
MemoryManager* memory_manager = nullptr; MemoryManager *memory_manager = nullptr;
namespace { namespace
{
static constexpr IterationCount kMaxIterations = 1000000000; static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport( BenchmarkReporter::Run CreateRunReport(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, const internal::ThreadManager::Result &results, IterationCount memory_iterations,
const internal::ThreadManager::Result& results, const MemoryManager::Result &memory_result, double seconds,
IterationCount memory_iterations, int64_t repetition_index)
const MemoryManager::Result& memory_result, double seconds, {
int64_t repetition_index) { // Create report about this benchmark run.
// Create report about this benchmark run. BenchmarkReporter::Run report;
BenchmarkReporter::Run report;
report.run_name = b.name; report.run_name = b.name;
report.error_occurred = results.has_error_; report.error_occurred = results.has_error_;
report.error_message = results.error_message_; report.error_message = results.error_message_;
report.report_label = results.report_label_; report.report_label = results.report_label_;
// This is the total iterations across all threads. // This is the total iterations across all threads.
report.iterations = results.iterations; report.iterations = results.iterations;
report.time_unit = b.time_unit; report.time_unit = b.time_unit;
report.threads = b.threads; report.threads = b.threads;
report.repetition_index = repetition_index; report.repetition_index = repetition_index;
report.repetitions = b.repetitions; report.repetitions = b.repetitions;
if (!report.error_occurred) { if (!report.error_occurred)
if (b.use_manual_time) { {
report.real_accumulated_time = results.manual_time_used; if (b.use_manual_time)
} else { {
report.real_accumulated_time = results.real_time_used; report.real_accumulated_time = results.manual_time_used;
}
else
{
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0)
{
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations : 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
} }
report.cpu_accumulated_time = results.cpu_time_used; return report;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0) {
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) /
memory_iterations
: 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
}
return report;
} }
// Execute one thread of benchmark b for the specified number of iterations. // Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total. // Adds the stats collected for the thread into *total.
void RunInThread(const BenchmarkInstance* b, IterationCount iters, void RunInThread(const BenchmarkInstance *b, IterationCount iters, int thread_id, ThreadManager *manager)
int thread_id, ThreadManager* manager) { {
internal::ThreadTimer timer( internal::ThreadTimer timer(b->measure_process_cpu_time ? internal::ThreadTimer::CreateProcessCpuTime()
b->measure_process_cpu_time : internal::ThreadTimer::Create());
? internal::ThreadTimer::CreateProcessCpuTime() State st = b->Run(iters, thread_id, &timer, manager);
: internal::ThreadTimer::Create()); CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
State st = b->Run(iters, thread_id, &timer, manager); << "Benchmark returned before State::KeepRunning() returned false!";
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) {
<< "Benchmark returned before State::KeepRunning() returned false!"; MutexLock l(manager->GetBenchmarkMutex());
{ internal::ThreadManager::Result &results = manager->results;
MutexLock l(manager->GetBenchmarkMutex()); results.iterations += st.iterations();
internal::ThreadManager::Result& results = manager->results; results.cpu_time_used += timer.cpu_time_used();
results.iterations += st.iterations(); results.real_time_used += timer.real_time_used();
results.cpu_time_used += timer.cpu_time_used(); results.manual_time_used += timer.manual_time_used();
results.real_time_used += timer.real_time_used(); results.complexity_n += st.complexity_length_n();
results.manual_time_used += timer.manual_time_used(); internal::Increment(&results.counters, st.counters);
results.complexity_n += st.complexity_length_n(); }
internal::Increment(&results.counters, st.counters); manager->NotifyThreadComplete();
}
manager->NotifyThreadComplete();
} }
class BenchmarkRunner { class BenchmarkRunner
public: {
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, public:
std::vector<BenchmarkReporter::Run>* complexity_reports_) BenchmarkRunner(const benchmark::internal::BenchmarkInstance &b_,
: b(b_), std::vector<BenchmarkReporter::Run> *complexity_reports_)
complexity_reports(*complexity_reports_), : b(b_), complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time), min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions repeats(b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions),
: FLAGS_benchmark_repetitions), has_explicit_iteration_count(b.iterations != 0), pool(b.threads - 1),
has_explicit_iteration_count(b.iterations != 0), iters(has_explicit_iteration_count ? b.iterations : 1)
pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1) {
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only =
FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only =
(b.aggregation_report_mode &
internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
DoOneRepetition(repetition_num);
}
// Calculate additional statistics
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance) {
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
}
RunResults&& get_results() { return std::move(run_results); }
private:
RunResults run_results;
const benchmark::internal::BenchmarkInstance& b;
std::vector<BenchmarkReporter::Run>& complexity_reports;
const double min_time;
const int repeats;
const bool has_explicit_iteration_count;
std::vector<std::thread> pool;
IterationCount iters; // preserved between repetitions!
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
struct IterationResults {
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations() {
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads));
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
manager.get());
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
// Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get());
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join();
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
{ {
MutexLock l(manager->GetBenchmarkMutex()); run_results.display_report_aggregates_only =
i.results = manager->results; (FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only = FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified)
{
run_results.display_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++)
{
DoOneRepetition(repetition_num);
}
// Calculate additional statistics
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance)
{
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
} }
// And get rid of the manager. RunResults &&get_results()
manager.reset(); {
return std::move(run_results);
// Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
<< i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time) {
i.seconds = i.results.manual_time_used;
} else if (b.use_real_time) {
i.seconds = i.results.real_time_used;
} }
return i; private:
} RunResults run_results;
IterationCount PredictNumItersNeeded(const IterationResults& i) const { const benchmark::internal::BenchmarkInstance &b;
// See how much iterations should be increased by. std::vector<BenchmarkReporter::Run> &complexity_reports;
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up. const double min_time;
const IterationCount max_next_iters = static_cast<IterationCount>( const int repeats;
std::lround(std::max(multiplier * static_cast<double>(i.iters), const bool has_explicit_iteration_count;
static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; std::vector<std::thread> pool;
return next_iters; // round up before conversion to integer.
}
bool ShouldReportIterationResults(const IterationResults& i) const { IterationCount iters; // preserved between repetitions!
// Determine if this run should be reported; // So only the first repetition has to find/calculate it,
// Either it has run for a sufficient amount of time // the other repetitions will just use that precomputed iteration count.
// or because an error was reported.
return i.results.has_error_ ||
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
void DoOneRepetition(int64_t repetition_index) { struct IterationResults
const bool is_the_first_repetition = repetition_index == 0; {
IterationResults i; internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations()
{
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
// We *may* be gradually increasing the length (iteration count) std::unique_ptr<internal::ThreadManager> manager;
// of the benchmark until we decide the results are significant. manager.reset(new internal::ThreadManager(b.threads));
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;) {
i = DoNIterations();
// Do we consider the results to be significant? // Run all but one thread in separate threads
// If we are doing repetitions, and the first repetition was already done, for (std::size_t ti = 0; ti < pool.size(); ++ti)
// it has calculated the correct iteration time, so we have run that very {
// iteration count just now. No need to calculate anything. Just report. pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), manager.get());
// Else, the normal rules apply. }
const bool results_are_significant = !is_the_first_repetition || // And run one thread here directly.
has_explicit_iteration_count || // (If we were asked to run just one thread, we don't create new threads.)
ShouldReportIterationResults(i); // Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get());
if (results_are_significant) break; // Good, let's report them! // The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread &thread : pool)
thread.join();
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient IterationResults i;
// iteration count, and run the benchmark again... // Acquire the measurements/counters from the manager, UNDER THE LOCK!
{
MutexLock l(manager->GetBenchmarkMutex());
i.results = manager->results;
}
iters = PredictNumItersNeeded(i); // And get rid of the manager.
assert(iters > i.iters && manager.reset();
"if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run."); // Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time)
i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time)
{
i.seconds = i.results.manual_time_used;
}
else if (b.use_real_time)
{
i.seconds = i.results.real_time_used;
}
return i;
} }
// Oh, one last thing, we need to also produce the 'memory measurements'.. IterationCount PredictNumItersNeeded(const IterationResults &i) const
MemoryManager::Result memory_result; {
IterationCount memory_iterations = 0; // See how much iterations should be increased by.
if (memory_manager != nullptr) { // Note: Avoid division by zero with max(seconds, 1ns).
// Only run a few iterations to reduce the impact of one-time double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// allocations in benchmarks that are not properly managed. // If our last run was at least 10% of FLAGS_benchmark_min_time then we
memory_iterations = std::min<IterationCount>(16, iters); // use the multiplier directly.
memory_manager->Start(); // Otherwise we use at most 10 times expansion.
std::unique_ptr<internal::ThreadManager> manager; // NOTE: When the last run was at least 10% of the min time the max
manager.reset(new internal::ThreadManager(1)); // expansion should be 14x.
RunInThread(&b, memory_iterations, 0, manager.get()); bool is_significant = (i.seconds / min_time) > 0.1;
manager->WaitForAllThreads(); multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
manager.reset(); if (multiplier <= 1.0)
multiplier = 2.0;
memory_manager->Stop(&memory_result); // So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters), static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
return next_iters; // round up before conversion to integer.
} }
// Ok, now actualy report. bool ShouldReportIterationResults(const IterationResults &i) const
BenchmarkReporter::Run report = {
CreateRunReport(b, i.results, memory_iterations, memory_result, // Determine if this run should be reported;
i.seconds, repetition_index); // Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ || i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
if (!report.error_occurred && b.complexity != oNone) void DoOneRepetition(int64_t repetition_index)
complexity_reports.push_back(report); {
const bool is_the_first_repetition = repetition_index == 0;
IterationResults i;
run_results.non_aggregates.push_back(report); // We *may* be gradually increasing the length (iteration count)
} // of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;)
{
i = DoNIterations();
// Do we consider the results to be significant?
// If we are doing repetitions, and the first repetition was already done,
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant =
!is_the_first_repetition || has_explicit_iteration_count || ShouldReportIterationResults(i);
if (results_are_significant)
break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i);
assert(iters > i.iters && "if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr)
{
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
memory_manager->Start();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
RunInThread(&b, memory_iterations, 0, manager.get());
manager->WaitForAllThreads();
manager.reset();
memory_manager->Stop(&memory_result);
}
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, repetition_index);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report);
run_results.non_aggregates.push_back(report);
}
}; };
} // end namespace } // end namespace
RunResults RunBenchmark( RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, std::vector<BenchmarkReporter::Run> *complexity_reports)
std::vector<BenchmarkReporter::Run>* complexity_reports) { {
internal::BenchmarkRunner r(b, complexity_reports); internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results(); return r.get_results();
} }
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark

View File

@ -26,26 +26,28 @@ DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only); DECLARE_bool(benchmark_display_aggregates_only);
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
extern MemoryManager* memory_manager; extern MemoryManager *memory_manager;
struct RunResults { struct RunResults
std::vector<BenchmarkReporter::Run> non_aggregates; {
std::vector<BenchmarkReporter::Run> aggregates_only; std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
bool display_report_aggregates_only = false; bool display_report_aggregates_only = false;
bool file_report_aggregates_only = false; bool file_report_aggregates_only = false;
}; };
RunResults RunBenchmark( RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, std::vector<BenchmarkReporter::Run> *complexity_reports);
std::vector<BenchmarkReporter::Run>* complexity_reports);
} // namespace internal } // namespace internal
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_RUNNER_H_ #endif // BENCHMARK_RUNNER_H_

View File

@ -8,56 +8,63 @@
#include "internal_macros.h" #include "internal_macros.h"
#include "log.h" #include "log.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
typedef void(AbortHandlerT)(); typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() { inline AbortHandlerT *&GetAbortHandler()
static AbortHandlerT* handler = &std::abort; {
return handler; static AbortHandlerT *handler = &std::abort;
return handler;
} }
BENCHMARK_NORETURN inline void CallAbortHandler() { BENCHMARK_NORETURN inline void CallAbortHandler()
GetAbortHandler()(); {
std::abort(); // fallback to enforce noreturn GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
} }
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler // CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed. // will log information about the failures and abort when it is destructed.
class CheckHandler { class CheckHandler
public: {
CheckHandler(const char* check, const char* file, const char* func, int line) public:
: log_(GetErrorLogInstance()) { CheckHandler(const char *check, const char *file, const char *func, int line) : log_(GetErrorLogInstance())
log_ << file << ":" << line << ": " << func << ": Check `" << check {
<< "' failed. "; log_ << file << ":" << line << ": " << func << ": Check `" << check << "' failed. ";
} }
LogType& GetLog() { return log_; } LogType &GetLog()
{
return log_;
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false)
log_ << std::endl; {
CallAbortHandler(); log_ << std::endl;
} CallAbortHandler();
}
CheckHandler& operator=(const CheckHandler&) = delete; CheckHandler &operator=(const CheckHandler &) = delete;
CheckHandler(const CheckHandler&) = delete; CheckHandler(const CheckHandler &) = delete;
CheckHandler() = delete; CheckHandler() = delete;
private: private:
LogType& log_; LogType &log_;
}; };
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information // The CHECK macro returns a std::ostream object that can have extra information
// written to it. // written to it.
#ifndef NDEBUG #ifndef NDEBUG
#define CHECK(b) \ #define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \ (b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__).GetLog())
.GetLog())
#else #else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance() #define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif #endif

View File

@ -25,164 +25,174 @@
#include "internal_macros.h" #include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#include <io.h> #include <io.h>
#include <windows.h>
#else #else
#include <unistd.h> #include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS
namespace benchmark { namespace benchmark
namespace { {
namespace
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode; typedef WORD PlatformColorCode;
#else #else
typedef const char* PlatformColorCode; typedef const char *PlatformColorCode;
#endif #endif
PlatformColorCode GetPlatformColorCode(LogColor color) { PlatformColorCode GetPlatformColorCode(LogColor color)
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
switch (color) { switch (color)
{
case COLOR_RED: case COLOR_RED:
return FOREGROUND_RED; return FOREGROUND_RED;
case COLOR_GREEN: case COLOR_GREEN:
return FOREGROUND_GREEN; return FOREGROUND_GREEN;
case COLOR_YELLOW: case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN; return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_BLUE: case COLOR_BLUE:
return FOREGROUND_BLUE; return FOREGROUND_BLUE;
case COLOR_MAGENTA: case COLOR_MAGENTA:
return FOREGROUND_BLUE | FOREGROUND_RED; return FOREGROUND_BLUE | FOREGROUND_RED;
case COLOR_CYAN: case COLOR_CYAN:
return FOREGROUND_BLUE | FOREGROUND_GREEN; return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default case COLOR_WHITE: // fall through to default
default: default:
return 0; return 0;
}
#else
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace
std::string FormatString(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else {
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char* msg, ...) {
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args) {
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char* color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal() {
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char* const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color",
"screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
};
const char* const term = getenv("TERM");
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
term_supports_color = true;
break;
} }
} #else
switch (color)
return 0 != isatty(fileno(stdout)) && term_supports_color; {
#endif // BENCHMARK_OS_WINDOWS case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
} }
} // end namespace benchmark } // end namespace
std::string FormatString(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else
{
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char *msg, ...)
{
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args)
{
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char *color_code = GetPlatformColorCode(color);
if (color_code)
out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal()
{
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char *const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color", "screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", "linux", "cygwin",
};
const char *const term = getenv("TERM");
bool term_supports_color = false;
for (const char *candidate : SUPPORTED_TERM_VALUES)
{
if (term && 0 == strcmp(term, candidate))
{
term_supports_color = true;
break;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
}
} // end namespace benchmark

View File

@ -5,29 +5,30 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
namespace benchmark { namespace benchmark
enum LogColor { {
COLOR_DEFAULT, enum LogColor
COLOR_RED, {
COLOR_GREEN, COLOR_DEFAULT,
COLOR_YELLOW, COLOR_RED,
COLOR_BLUE, COLOR_GREEN,
COLOR_MAGENTA, COLOR_YELLOW,
COLOR_CYAN, COLOR_BLUE,
COLOR_WHITE COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
}; };
std::string FormatString(const char* msg, va_list args); std::string FormatString(const char *msg, va_list args);
std::string FormatString(const char* msg, ...); std::string FormatString(const char *msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args);
va_list args); void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored // Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise. // output, false otherwise.
bool IsColorTerminal(); bool IsColorTerminal();
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_COLORPRINT_H_ #endif // BENCHMARK_COLORPRINT_H_

View File

@ -21,112 +21,121 @@
#include <iostream> #include <iostream>
#include <limits> #include <limits>
namespace benchmark { namespace benchmark
namespace { {
namespace
{
// Parses 'str' for a 32-bit signed integer. If successful, writes // Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value // the result to *value and returns true; otherwise leaves *value
// unchanged and returns false. // unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { bool ParseInt32(const std::string &src_text, const char *str, int32_t *value)
// Parses the environment variable as a decimal integer. {
char* end = nullptr; // Parses the environment variable as a decimal integer.
const long long_value = strtol(str, &end, 10); // NOLINT char *end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string? // Has strtol() consumed all characters in the string?
if (*end != '\0') { if (*end != '\0')
// No - an invalid character was encountered. {
std::cerr << src_text << " is expected to be a 32-bit integer, " // No - an invalid character was encountered.
<< "but actually has value \"" << str << "\".\n"; std::cerr << src_text << " is expected to be a 32-bit integer, "
return false; << "but actually has value \"" << str << "\".\n";
} return false;
}
// Is the parsed value in the range of an Int32? // Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value); const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() || if (long_value == std::numeric_limits<long>::max() || long_value == std::numeric_limits<long>::min() ||
long_value == std::numeric_limits<long>::min() || // The parsed value overflows as a long. (strtol() returns
// The parsed value overflows as a long. (strtol() returns // LONG_MAX or LONG_MIN when the input overflows.)
// LONG_MAX or LONG_MIN when the input overflows.) result != long_value
result != long_value // The parsed value overflows as an Int32.
// The parsed value overflows as an Int32. )
) { {
std::cerr << src_text << " is expected to be a 32-bit integer, " std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", " << "but actually has value \"" << str << "\", "
<< "which overflows.\n"; << "which overflows.\n";
return false; return false;
} }
*value = result; *value = result;
return true; return true;
} }
// Parses 'str' for a double. If successful, writes the result to *value and // Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false. // returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) { bool ParseDouble(const std::string &src_text, const char *str, double *value)
// Parses the environment variable as a decimal integer. {
char* end = nullptr; // Parses the environment variable as a decimal integer.
const double double_value = strtod(str, &end); // NOLINT char *end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string? // Has strtol() consumed all characters in the string?
if (*end != '\0') { if (*end != '\0')
// No - an invalid character was encountered. {
std::cerr << src_text << " is expected to be a double, " // No - an invalid character was encountered.
<< "but actually has value \"" << str << "\".\n"; std::cerr << src_text << " is expected to be a double, "
return false; << "but actually has value \"" << str << "\".\n";
} return false;
}
*value = double_value; *value = double_value;
return true; return true;
} }
// Returns the name of the environment variable corresponding to the // Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return // given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version. // "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) { static std::string FlagToEnvVar(const char *flag)
const std::string flag_str(flag); {
const std::string flag_str(flag);
std::string env_var; std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i) for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i])); env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
return "BENCHMARK_" + env_var; return "BENCHMARK_" + env_var;
} }
} // namespace } // namespace
bool BoolFromEnv(const char* flag, bool default_val) { bool BoolFromEnv(const char *flag, bool default_val)
const std::string env_var = FlagToEnvVar(flag); {
const char* const value_str = getenv(env_var.c_str()); const std::string env_var = FlagToEnvVar(flag);
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); const char *const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
} }
int32_t Int32FromEnv(const char* flag, int32_t default_val) { int32_t Int32FromEnv(const char *flag, int32_t default_val)
const std::string env_var = FlagToEnvVar(flag); {
const char* const value_str = getenv(env_var.c_str()); const std::string env_var = FlagToEnvVar(flag);
int32_t value = default_val; const char *const value_str = getenv(env_var.c_str());
if (value_str == nullptr || int32_t value = default_val;
!ParseInt32(std::string("Environment variable ") + env_var, value_str, if (value_str == nullptr || !ParseInt32(std::string("Environment variable ") + env_var, value_str, &value))
&value)) { {
return default_val; return default_val;
} }
return value; return value;
} }
double DoubleFromEnv(const char* flag, double default_val) { double DoubleFromEnv(const char *flag, double default_val)
const std::string env_var = FlagToEnvVar(flag); {
const char* const value_str = getenv(env_var.c_str()); const std::string env_var = FlagToEnvVar(flag);
double value = default_val; const char *const value_str = getenv(env_var.c_str());
if (value_str == nullptr || double value = default_val;
!ParseDouble(std::string("Environment variable ") + env_var, value_str, if (value_str == nullptr || !ParseDouble(std::string("Environment variable ") + env_var, value_str, &value))
&value)) { {
return default_val; return default_val;
} }
return value; return value;
} }
const char* StringFromEnv(const char* flag, const char* default_val) { const char *StringFromEnv(const char *flag, const char *default_val)
const std::string env_var = FlagToEnvVar(flag); {
const char* const value = getenv(env_var.c_str()); const std::string env_var = FlagToEnvVar(flag);
return value == nullptr ? default_val : value; const char *const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
} }
// Parses a string as a command line flag. The string should have // Parses a string as a command line flag. The string should have
@ -134,95 +143,109 @@ const char* StringFromEnv(const char* flag, const char* default_val) {
// part can be omitted. // part can be omitted.
// //
// Returns the value of the flag, or nullptr if the parsing failed. // Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag, const char *ParseFlagValue(const char *str, const char *flag, bool def_optional)
bool def_optional) { {
// str and flag must not be nullptr. // str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr; if (str == nullptr || flag == nullptr)
return nullptr;
// The flag must start with "--". // The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag); const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length(); const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; if (strncmp(str, flag_str.c_str(), flag_len) != 0)
return nullptr;
// Skips the flag name. // Skips the flag name.
const char* flag_end = str + flag_len; const char *flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part. // When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end; if (def_optional && (flag_end[0] == '\0'))
return flag_end;
// If def_optional is true and there are more characters after the // If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after // flag name, or if def_optional is false, there must be a '=' after
// the flag name. // the flag name.
if (flag_end[0] != '=') return nullptr; if (flag_end[0] != '=')
return nullptr;
// Returns the string after "=". // Returns the string after "=".
return flag_end + 1; return flag_end + 1;
} }
bool ParseBoolFlag(const char* str, const char* flag, bool* value) { bool ParseBoolFlag(const char *str, const char *flag, bool *value)
// Gets the value of the flag as a string. {
const char* const value_str = ParseFlagValue(str, flag, true); // Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed. // Aborts if the parsing failed.
if (value_str == nullptr) return false; if (value_str == nullptr)
return false;
// Converts the string value to a bool. // Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str); *value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
*value = value_str;
return true;
}
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
if (value.size() == 1) {
char v = value[0];
return isalnum(v) &&
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
} else if (!value.empty()) {
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" ||
value_lower == "off");
} else
return true; return true;
} }
} // end namespace benchmark bool ParseInt32Flag(const char *str, const char *flag, int32_t *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseDoubleFlag(const char *str, const char *flag, double *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseStringFlag(const char *str, const char *flag, std::string *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
*value = value_str;
return true;
}
bool IsFlag(const char *str, const char *flag)
{
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string &value)
{
if (value.size() == 1)
{
char v = value[0];
return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
}
else if (!value.empty())
{
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" || value_lower == "off");
}
else
return true;
}
} // end namespace benchmark

View File

@ -14,48 +14,41 @@
#define DECLARE_string(name) extern std::string FLAG(name) #define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags. // Macros for defining flags.
#define DEFINE_bool(name, default_val) \ #define DEFINE_bool(name, default_val) bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
bool FLAG(name) = \ #define DEFINE_int32(name, default_val) int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
benchmark::BoolFromEnv(#name, default_val) #define DEFINE_double(name, default_val) double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \ #define DEFINE_string(name, default_val) std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
int32_t FLAG(name) = \
benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
double FLAG(name) = \
benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
std::string FLAG(name) = \
benchmark::StringFromEnv(#name, default_val)
namespace benchmark { namespace benchmark
{
// Parses a bool from the environment variable // Parses a bool from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns IsTruthyFlagValue() value; if not, // If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value. // returns the given default value.
bool BoolFromEnv(const char* flag, bool default_val); bool BoolFromEnv(const char *flag, bool default_val);
// Parses an Int32 from the environment variable // Parses an Int32 from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns ParseInt32() value; if not, returns // If the variable exists, returns ParseInt32() value; if not, returns
// the given default value. // the given default value.
int32_t Int32FromEnv(const char* flag, int32_t default_val); int32_t Int32FromEnv(const char *flag, int32_t default_val);
// Parses an Double from the environment variable // Parses an Double from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns ParseDouble(); if not, returns // If the variable exists, returns ParseDouble(); if not, returns
// the given default value. // the given default value.
double DoubleFromEnv(const char* flag, double default_val); double DoubleFromEnv(const char *flag, double default_val);
// Parses a string from the environment variable // Parses a string from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If variable exists, returns its value; if not, returns // If variable exists, returns its value; if not, returns
// the given default value. // the given default value.
const char* StringFromEnv(const char* flag, const char* default_val); const char *StringFromEnv(const char *flag, const char *default_val);
// Parses a string for a bool flag, in the form of either // Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag". // "--flag=value" or "--flag".
@ -66,38 +59,38 @@ const char* StringFromEnv(const char* flag, const char* default_val);
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value); bool ParseBoolFlag(const char *str, const char *flag, bool *value);
// Parses a string for an Int32 flag, in the form of // Parses a string for an Int32 flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); bool ParseInt32Flag(const char *str, const char *flag, int32_t *value);
// Parses a string for a Double flag, in the form of // Parses a string for a Double flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value); bool ParseDoubleFlag(const char *str, const char *flag, double *value);
// Parses a string for a string flag, in the form of // Parses a string for a string flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value); bool ParseStringFlag(const char *str, const char *flag, std::string *value);
// Returns true if the string matches the flag. // Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag); bool IsFlag(const char *str, const char *flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches // some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also // one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string. // returns true if value is the empty string.
bool IsTruthyFlagValue(const std::string& value); bool IsTruthyFlagValue(const std::string &value);
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_ #endif // BENCHMARK_COMMANDLINEFLAGS_H_

View File

@ -17,56 +17,58 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h" #include "check.h"
#include "complexity.h" #include "complexity.h"
#include <algorithm>
#include <cmath>
namespace benchmark { namespace benchmark
{
// Internal function to calculate the different scalability forms // Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) { BigOFunc *FittingCurve(BigO complexity)
static const double kLog2E = 1.44269504088896340736; {
switch (complexity) { static const double kLog2E = 1.44269504088896340736;
switch (complexity)
{
case oN: case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); }; return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared: case oNSquared:
return [](IterationCount n) -> double { return std::pow(n, 2); }; return [](IterationCount n) -> double { return std::pow(n, 2); };
case oNCubed: case oNCubed:
return [](IterationCount n) -> double { return std::pow(n, 3); }; return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN: case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */ /* Note: can't use log2 because Android's GNU STL lacks it */
return return [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
[](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN: case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */ /* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return [](IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); };
return kLog2E * n * log(static_cast<double>(n));
};
case o1: case o1:
default: default:
return [](IterationCount) { return 1.0; }; return [](IterationCount) { return 1.0; };
} }
} }
// Function to return an string for the calculated complexity // Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) { std::string GetBigOString(BigO complexity)
switch (complexity) { {
switch (complexity)
{
case oN: case oN:
return "N"; return "N";
case oNSquared: case oNSquared:
return "N^2"; return "N^2";
case oNCubed: case oNCubed:
return "N^3"; return "N^3";
case oLogN: case oLogN:
return "lgN"; return "lgN";
case oNLogN: case oNLogN:
return "NlgN"; return "NlgN";
case o1: case o1:
return "(1)"; return "(1)";
default: default:
return "f(N)"; return "f(N)";
} }
} }
// Find the coefficient for the high-order term in the running time, by // Find the coefficient for the high-order term in the running time, by
@ -79,41 +81,42 @@ std::string GetBigOString(BigO complexity) {
// For a deeper explanation on the algorithm logic, please refer to // For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n, LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, BigOFunc *fitting_curve)
const std::vector<double>& time, {
BigOFunc* fitting_curve) { double sigma_gn = 0.0;
double sigma_gn = 0.0; double sigma_gn_squared = 0.0;
double sigma_gn_squared = 0.0; double sigma_time = 0.0;
double sigma_time = 0.0; double sigma_time_gn = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter // Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) { for (size_t i = 0; i < n.size(); ++i)
double gn_i = fitting_curve(n[i]); {
sigma_gn += gn_i; double gn_i = fitting_curve(n[i]);
sigma_gn_squared += gn_i * gn_i; sigma_gn += gn_i;
sigma_time += time[i]; sigma_gn_squared += gn_i * gn_i;
sigma_time_gn += time[i] * gn_i; sigma_time += time[i];
} sigma_time_gn += time[i] * gn_i;
}
LeastSq result; LeastSq result;
result.complexity = oLambda; result.complexity = oLambda;
// Calculate complexity. // Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared; result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate RMS // Calculate RMS
double rms = 0.0; double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) { for (size_t i = 0; i < n.size(); ++i)
double fit = result.coef * fitting_curve(n[i]); {
rms += pow((time[i] - fit), 2); double fit = result.coef * fitting_curve(n[i]);
} rms += pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values // Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size(); double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean; result.rms = sqrt(rms / n.size()) / mean;
return result; return result;
} }
// Find the coefficient for the high-order term in the running time, by // Find the coefficient for the high-order term in the running time, by
@ -123,116 +126,126 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// - complexity : If different than oAuto, the fitting curve will stick to // - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best // this one. If it is oAuto, it will be calculated the best
// fitting curve. // fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n, LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, const BigO complexity)
const std::vector<double>& time, const BigO complexity) { {
CHECK_EQ(n.size(), time.size()); CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given // benchmark runs are given
CHECK_NE(complexity, oNone); CHECK_NE(complexity, oNone);
LeastSq best_fit; LeastSq best_fit;
if (complexity == oAuto) { if (complexity == oAuto)
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; {
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve // Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1)); best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1; best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one // Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) { for (const auto &fit : fit_curves)
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); {
if (current_fit.rms < best_fit.rms) { LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
best_fit = current_fit; if (current_fit.rms < best_fit.rms)
best_fit.complexity = fit; {
} best_fit = current_fit;
best_fit.complexity = fit;
}
}
}
else
{
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
} }
} else {
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
return best_fit; return best_fit;
} }
std::vector<BenchmarkReporter::Run> ComputeBigO( std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports)
const std::vector<BenchmarkReporter::Run>& reports) { {
typedef BenchmarkReporter::Run Run; typedef BenchmarkReporter::Run Run;
std::vector<Run> results; std::vector<Run> results;
if (reports.size() < 2) return results; if (reports.size() < 2)
return results;
// Accumulators. // Accumulators.
std::vector<int64_t> n; std::vector<int64_t> n;
std::vector<double> real_time; std::vector<double> real_time;
std::vector<double> cpu_time; std::vector<double> cpu_time;
// Populate the accumulators. // Populate the accumulators.
for (const Run& run : reports) { for (const Run &run : reports)
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; {
n.push_back(run.complexity_n); CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
real_time.push_back(run.real_accumulated_time / run.iterations); n.push_back(run.complexity_n);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations); real_time.push_back(run.real_accumulated_time / run.iterations);
} cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
LeastSq result_cpu; LeastSq result_cpu;
LeastSq result_real; LeastSq result_real;
if (reports[0].complexity == oLambda) { if (reports[0].complexity == oLambda)
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); {
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
} else { result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); }
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); else
} {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
// Drop the 'args' when reporting complexity. // Drop the 'args' when reporting complexity.
auto run_name = reports[0].run_name; auto run_name = reports[0].run_name;
run_name.args.clear(); run_name.args.clear();
// Get the data from the accumulator to BenchmarkReporter::Run's. // Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o; Run big_o;
big_o.run_name = run_name; big_o.run_name = run_name;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate; big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
big_o.repetitions = reports[0].repetitions; big_o.repetitions = reports[0].repetitions;
big_o.repetition_index = Run::no_repetition_index; big_o.repetition_index = Run::no_repetition_index;
big_o.threads = reports[0].threads; big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO"; big_o.aggregate_name = "BigO";
big_o.report_label = reports[0].report_label; big_o.report_label = reports[0].report_label;
big_o.iterations = 0; big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef; big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef; big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true; big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity; big_o.complexity = result_cpu.complexity;
// All the time results are reported after being multiplied by the // All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it // time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the // should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the // multiplier so that when it is multiplied later the result is the
// correct one. // correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs // Only add label to mean/stddev if it is same for all runs
Run rms; Run rms;
rms.run_name = run_name; rms.run_name = run_name;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate; rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS"; rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label; rms.report_label = big_o.report_label;
rms.iterations = 0; rms.iterations = 0;
rms.repetition_index = Run::no_repetition_index; rms.repetition_index = Run::no_repetition_index;
rms.repetitions = reports[0].repetitions; rms.repetitions = reports[0].repetitions;
rms.threads = reports[0].threads; rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier; rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier; rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true; rms.report_rms = true;
rms.complexity = result_cpu.complexity; rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to // don't forget to keep the time unit, or we won't be able to
// recover the correct value. // recover the correct value.
rms.time_unit = reports[0].time_unit; rms.time_unit = reports[0].time_unit;
results.push_back(big_o); results.push_back(big_o);
results.push_back(rms); results.push_back(rms);
return results; return results;
} }
} // end namespace benchmark } // end namespace benchmark

View File

@ -23,12 +23,12 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// Return a vector containing the bigO and RMS information for the specified // Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned. // list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO( std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports);
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq // This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as // - coef : Estimated coeficient for the high-order term as
@ -39,17 +39,20 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// the same value. In case BigO::oAuto has been selected, this // the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected. // parameter will return the best fitting curve detected.
struct LeastSq { struct LeastSq
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone)
{
}
double coef; double coef;
double rms; double rms;
BigO complexity; BigO complexity;
}; };
// Function to return an string for the calculated complexity // Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity); std::string GetBigOString(BigO complexity);
} // end namespace benchmark } // end namespace benchmark
#endif // COMPLEXITY_H_ #endif // COMPLEXITY_H_

View File

@ -31,147 +31,164 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
bool ConsoleReporter::ReportContext(const Context& context) { bool ConsoleReporter::ReportContext(const Context &context)
name_field_width_ = context.name_field_width; {
printed_header_ = false; name_field_width_ = context.name_field_width;
prev_counters_.clear(); printed_header_ = false;
prev_counters_.clear();
PrintBasicContext(&GetErrorStream(), context); PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream())
GetErrorStream() {
<< "Color printing is only supported for stdout on windows." GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n"; " Disabling color printing\n";
output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
} }
#endif #endif
return true; return true;
} }
void ConsoleReporter::PrintHeader(const Run& run) { void ConsoleReporter::PrintHeader(const Run &run)
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), {
"Benchmark", "Time", "CPU", "Iterations"); std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), "Benchmark", "Time",
if(!run.counters.empty()) { "CPU", "Iterations");
if(output_options_ & OO_Tabular) { if (!run.counters.empty())
for(auto const& c : run.counters) { {
str += FormatString(" %10s", c.first.c_str()); if (output_options_ & OO_Tabular)
} {
} else { for (auto const &c : run.counters)
str += " UserCounters..."; {
str += FormatString(" %10s", c.first.c_str());
}
}
else
{
str += " UserCounters...";
}
} }
} std::string line = std::string(str.length(), '-');
std::string line = std::string(str.length(), '-'); GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
} }
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) { void ConsoleReporter::ReportRuns(const std::vector<Run> &reports)
for (const auto& run : reports) { {
// print the header: for (const auto &run : reports)
// --- if none was printed yet {
bool print_header = !printed_header_; // print the header:
// --- or if the format is tabular and this run // --- if none was printed yet
// has different fields from the prev header bool print_header = !printed_header_;
print_header |= (output_options_ & OO_Tabular) && // --- or if the format is tabular and this run
(!internal::SameNames(run.counters, prev_counters_)); // has different fields from the prev header
if (print_header) { print_header |= (output_options_ & OO_Tabular) && (!internal::SameNames(run.counters, prev_counters_));
printed_header_ = true; if (print_header)
prev_counters_ = run.counters; {
PrintHeader(run); printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
} }
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
} }
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, static void IgnoreColorPrint(std::ostream &out, LogColor, const char *fmt, ...)
...) { {
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
out << FormatString(fmt, args); out << FormatString(fmt, args);
va_end(args); va_end(args);
} }
static std::string FormatTime(double time)
static std::string FormatTime(double time) { {
// Align decimal places... // Align decimal places...
if (time < 1.0) { if (time < 1.0)
return FormatString("%10.3f", time); {
} return FormatString("%10.3f", time);
if (time < 10.0) { }
return FormatString("%10.2f", time); if (time < 10.0)
} {
if (time < 100.0) { return FormatString("%10.2f", time);
return FormatString("%10.1f", time); }
} if (time < 100.0)
return FormatString("%10.0f", time); {
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
} }
void ConsoleReporter::PrintRunData(const Run& result) { void ConsoleReporter::PrintRunData(const Run &result)
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); {
auto& Out = GetOutputStream(); typedef void(PrinterFn)(std::ostream &, LogColor, const char *, ...);
PrinterFn* printer = (output_options_ & OO_Color) ? auto &Out = GetOutputStream();
(PrinterFn*)ColorPrintf : IgnoreColorPrint; PrinterFn *printer = (output_options_ & OO_Color) ? (PrinterFn *)ColorPrintf : IgnoreColorPrint;
auto name_color = auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str());
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str()); if (result.error_occurred)
{
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o)
{
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), cpu_time, big_o.c_str());
}
else if (result.report_rms)
{
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%");
}
else
{
const char *timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, cpu_time_str.c_str(),
timeLabel);
}
if (!result.report_big_o && !result.report_rms)
{
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto &c : result.counters)
{
const std::size_t cNameLen = std::max(std::string::size_type(10), c.first.length());
auto const &s = HumanReadableNumber(c.second.value, c.second.oneK);
const char *unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular)
{
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit);
}
else
{
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty())
{
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n"); printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
cpu_time * 100, "%");
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
cpu_time_str.c_str(), timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
const char* unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) {
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
unit);
} else {
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
printer(Out, COLOR_DEFAULT, "\n");
} }
} // end namespace benchmark } // end namespace benchmark

View File

@ -14,67 +14,85 @@
#include "counter.h" #include "counter.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
double Finish(Counter const& c, IterationCount iterations, double cpu_time, double Finish(Counter const &c, IterationCount iterations, double cpu_time, double num_threads)
double num_threads) { {
double v = c.value; double v = c.value;
if (c.flags & Counter::kIsRate) { if (c.flags & Counter::kIsRate)
v /= cpu_time; {
} v /= cpu_time;
if (c.flags & Counter::kAvgThreads) {
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
v *= iterations;
}
if (c.flags & Counter::kAvgIterations) {
v /= iterations;
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
double num_threads) {
for (auto& c : *l) {
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
void Increment(UserCounters* l, UserCounters const& r) {
// add counters present in both or just in *l
for (auto& c : *l) {
auto it = r.find(c.first);
if (it != r.end()) {
c.second.value = c.second + it->second;
} }
} if (c.flags & Counter::kAvgThreads)
// add counters present in r, but not in *l {
for (auto const& tc : r) { v /= num_threads;
auto it = l->find(tc.first);
if (it == l->end()) {
(*l)[tc.first] = tc.second;
} }
} if (c.flags & Counter::kIsIterationInvariant)
{
v *= iterations;
}
if (c.flags & Counter::kAvgIterations)
{
v /= iterations;
}
if (c.flags & Counter::kInvert)
{ // Invert is *always* last.
v = 1.0 / v;
}
return v;
} }
bool SameNames(UserCounters const& l, UserCounters const& r) { void Finish(UserCounters *l, IterationCount iterations, double cpu_time, double num_threads)
if (&l == &r) return true; {
if (l.size() != r.size()) { for (auto &c : *l)
return false; {
} c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
for (auto const& c : l) {
if (r.find(c.first) == r.end()) {
return false;
} }
}
return true;
} }
} // end namespace internal void Increment(UserCounters *l, UserCounters const &r)
} // end namespace benchmark {
// add counters present in both or just in *l
for (auto &c : *l)
{
auto it = r.find(c.first);
if (it != r.end())
{
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const &tc : r)
{
auto it = l->find(tc.first);
if (it == l->end())
{
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const &l, UserCounters const &r)
{
if (&l == &r)
return true;
if (l.size() != r.size())
{
return false;
}
for (auto const &c : l)
{
if (r.find(c.first) == r.end())
{
return false;
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark

View File

@ -17,16 +17,17 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// these counter-related functions are hidden to reduce API surface. // these counter-related functions are hidden to reduce API surface.
namespace internal { namespace internal
void Finish(UserCounters* l, IterationCount iterations, double time, {
double num_threads); void Finish(UserCounters *l, IterationCount iterations, double time, double num_threads);
void Increment(UserCounters* l, UserCounters const& r); void Increment(UserCounters *l, UserCounters const &r);
bool SameNames(UserCounters const& l, UserCounters const& r); bool SameNames(UserCounters const &l, UserCounters const &r);
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_COUNTER_H_ #endif // BENCHMARK_COUNTER_H_

View File

@ -28,127 +28,159 @@
// File format reference: http://edoceo.com/utilitas/csv-file-format. // File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark { namespace benchmark
{
namespace { namespace
std::vector<std::string> elements = { {
"name", "iterations", "real_time", "cpu_time", std::vector<std::string> elements = {"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label", "time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"}; "error_occurred", "error_message"};
} // namespace } // namespace
std::string CsvEscape(const std::string & s) { std::string CsvEscape(const std::string &s)
std::string tmp; {
tmp.reserve(s.size() + 2); std::string tmp;
for (char c : s) { tmp.reserve(s.size() + 2);
switch (c) { for (char c : s)
case '"' : tmp += "\"\""; break; {
default : tmp += c; break; switch (c)
{
case '"':
tmp += "\"\"";
break;
default:
tmp += c;
break;
}
} }
} return '"' + tmp + '"';
return '"' + tmp + '"';
} }
bool CSVReporter::ReportContext(const Context& context) { bool CSVReporter::ReportContext(const Context &context)
PrintBasicContext(&GetErrorStream(), context); {
return true; PrintBasicContext(&GetErrorStream(), context);
return true;
} }
void CSVReporter::ReportRuns(const std::vector<Run>& reports) { void CSVReporter::ReportRuns(const std::vector<Run> &reports)
std::ostream& Out = GetOutputStream(); {
std::ostream &Out = GetOutputStream();
if (!printed_header_) { if (!printed_header_)
// save the names of all the user counters {
for (const auto& run : reports) { // save the names of all the user counters
for (const auto& cnt : run.counters) { for (const auto &run : reports)
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") {
continue; for (const auto &cnt : run.counters)
user_counter_names_.insert(cnt.first); {
} if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();)
{
Out << *B++;
if (B != elements.end())
Out << ",";
}
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();)
{
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
}
else
{
// check that all the current counters are saved in the name set
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first << "\" was not in a run after being added to the header";
}
}
} }
// print the header // print results for each run
for (auto B = elements.begin(); B != elements.end();) { for (const auto &run : reports)
Out << *B++; {
if (B != elements.end()) Out << ","; PrintRunData(run);
} }
for (auto B = user_counter_names_.begin();
B != user_counter_names_.end();) {
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
} else {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto& run : reports) {
PrintRunData(run);
}
} }
void CSVReporter::PrintRunData(const Run& run) { void CSVReporter::PrintRunData(const Run &run)
std::ostream& Out = GetOutputStream(); {
Out << CsvEscape(run.benchmark_name()) << ","; std::ostream &Out = GetOutputStream();
if (run.error_occurred) { Out << CsvEscape(run.benchmark_name()) << ",";
Out << std::string(elements.size() - 3, ','); if (run.error_occurred)
Out << "true,"; {
Out << CsvEscape(run.error_message) << "\n"; Out << std::string(elements.size() - 3, ',');
return; Out << "true,";
} Out << CsvEscape(run.error_message) << "\n";
return;
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end()) {
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end()) {
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto& ucn : user_counter_names_) {
auto it = run.counters.find(ucn);
if (it == run.counters.end()) {
Out << ",";
} else {
Out << "," << it->second;
} }
}
Out << '\n'; // Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms)
{
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o)
{
Out << GetBigOString(run.complexity);
}
else if (!run.report_rms)
{
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end())
{
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end())
{
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty())
{
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto &ucn : user_counter_names_)
{
auto it = run.counters.find(ucn);
if (it == run.counters.end())
{
Out << ",";
}
else
{
Out << "," << it->second;
}
}
Out << '\n';
} }
} // end namespace benchmark } // end namespace benchmark

View File

@ -50,148 +50,151 @@ extern "C" uint64_t __rdtsc();
#include <emscripten.h> #include <emscripten.h>
#endif #endif
namespace benchmark { namespace benchmark
{
// NOTE: only i386 and x86_64 have been well tested. // NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on // PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14 // http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also // with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h // https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock { namespace cycleclock
{
// This should return the number of cycles since power-on. Thread-safe. // This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() { inline BENCHMARK_ALWAYS_INLINE int64_t Now()
{
#if defined(BENCHMARK_OS_MACOSX) #if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of // this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that // architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where // have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of // InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual // macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU // CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this // frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it // counter pauses; it does not continue counting, nor does it
// reset to zero. // reset to zero.
return mach_absolute_time(); return mach_absolute_time();
#elif defined(BENCHMARK_OS_EMSCRIPTEN) #elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten // this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it. // define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6); return static_cast<int64_t>(emscripten_get_now() * 1e+6);
#elif defined(__i386__) #elif defined(__i386__)
int64_t ret; int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret)); __asm__ volatile("rdtsc" : "=A"(ret));
return ret; return ret;
#elif defined(__x86_64__) || defined(__amd64__) #elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high; uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low; return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__) #elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count. // This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__) #if defined(__powerpc64__) || defined(__ppc64__)
int64_t tb; int64_t tb;
asm volatile("mfspr %0, 268" : "=r"(tb)); asm volatile("mfspr %0, 268" : "=r"(tb));
return tb; return tb;
#else #else
uint32_t tbl, tbu0, tbu1; uint32_t tbl, tbu0, tbu1;
asm volatile( asm volatile("mftbu %0\n"
"mftbu %0\n" "mftbl %1\n"
"mftbl %1\n" "mftbu %2"
"mftbu %2" : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); tbl &= -static_cast<int32_t>(tbu0 == tbu1);
tbl &= -static_cast<int32_t>(tbu0 == tbu1); // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) return (static_cast<uint64_t>(tbu1) << 32) | tbl;
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
#endif #endif
#elif defined(__sparc__) #elif defined(__sparc__)
int64_t tick; int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00"); asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick)); asm("mov %%g1, %0" : "=r"(tick));
return tick; return tick;
#elif defined(__ia64__) #elif defined(__ia64__)
int64_t itc; int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc)); asm("mov %0 = ar.itc" : "=r"(itc));
return itc; return itc;
#elif defined(COMPILER_MSVC) && defined(_M_IX86) #elif defined(COMPILER_MSVC) && defined(_M_IX86)
// Older MSVC compilers (like 7.x) don't seem to support the // Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead // __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope // when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler. // the code is being compiled with a non-ancient compiler.
_asm rdtsc _asm rdtsc
#elif defined(COMPILER_MSVC) #elif defined(COMPILER_MSVC)
return __rdtsc(); return __rdtsc();
#elif defined(BENCHMARK_OS_NACL) #elif defined(BENCHMARK_OS_NACL)
// Native Client validator on x86/x86-64 allows RDTSC instructions, // Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM // and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below), // rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to // so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any // architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics. // cycle counter access mnemonics.
// Native Client does not provide any API to access cycle counter. // Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at // because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux). // least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails. // Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0}; struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec; return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__) #elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's. // System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be // The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up // read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly. // the virtual timer properly.
int64_t virtual_timer_value; int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value; return virtual_timer_value;
#elif defined(__ARM_ARCH) #elif defined(__ARM_ARCH)
// V6 is the earliest arch that has a standard cyclecount // V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions. // Native Client validator doesn't allow MRC instructions.
#if (__ARM_ARCH >= 6) #if (__ARM_ARCH >= 6)
uint32_t pmccntr; uint32_t pmccntr;
uint32_t pmuseren; uint32_t pmuseren;
uint32_t pmcntenset; uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions. // Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. if (pmuseren & 1)
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); { // Allows reading perfmon counters for user mode code.
if (pmcntenset & 0x80000000ul) { // Is it counting? asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); if (pmcntenset & 0x80000000ul)
// The counter is set up to count every 64th cycle { // Is it counting?
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
} }
}
#endif #endif
struct timeval tv; struct timeval tv;
gettimeofday(&tv, nullptr); gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__mips__) #elif defined(__mips__)
// mips apparently only allows rdtsc for superusers, so we fall // mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better. // back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv; struct timeval tv;
gettimeofday(&tv, nullptr); gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x. #elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock. // Return the CPU clock.
uint64_t tsc; uint64_t tsc;
asm("stck %0" : "=Q"(tsc) : : "cc"); asm("stck %0" : "=Q"(tsc) : : "cc");
return tsc; return tsc;
#elif defined(__riscv) // RISC-V #elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32) // Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32 #if __riscv_xlen == 32
uint32_t cycles_lo, cycles_hi0, cycles_hi1; uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above. // This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching. // Implemented in assembly because Clang insisted on branching.
asm volatile( asm volatile("rdcycleh %0\n"
"rdcycleh %0\n" "rdcycle %1\n"
"rdcycle %1\n" "rdcycleh %2\n"
"rdcycleh %2\n" "sub %0, %0, %2\n"
"sub %0, %0, %2\n" "seqz %0, %0\n"
"seqz %0, %0\n" "sub %0, zero, %0\n"
"sub %0, zero, %0\n" "and %1, %1, %0\n"
"and %1, %1, %0\n" : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1)); return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else #else
uint64_t cycles; uint64_t cycles;
asm volatile("rdcycle %0" : "=r"(cycles)); asm volatile("rdcycle %0" : "=r"(cycles));
return cycles; return cycles;
#endif #endif
#else #else
// The soft failover to a generic implementation is automatic only for ARM. // The soft failover to a generic implementation is automatic only for ARM.
@ -200,7 +203,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#error You need to define CycleTimer for your OS and CPU #error You need to define CycleTimer for your OS and CPU
#endif #endif
} }
} // end namespace cycleclock } // end namespace cycleclock
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_CYCLECLOCK_H_ #endif // BENCHMARK_CYCLECLOCK_H_

View File

@ -91,4 +91,4 @@
// clang-format on // clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_ #endif // BENCHMARK_INTERNAL_MACROS_H_

View File

@ -18,7 +18,7 @@
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <cstdint> #include <cstdint>
#include <iomanip> // for setprecision #include <iomanip> // for setprecision
#include <iostream> #include <iostream>
#include <limits> #include <limits>
#include <string> #include <string>
@ -28,226 +28,267 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
namespace { namespace
{
std::string StrEscape(const std::string & s) { std::string StrEscape(const std::string &s)
std::string tmp; {
tmp.reserve(s.size()); std::string tmp;
for (char c : s) { tmp.reserve(s.size());
switch (c) { for (char c : s)
case '\b': tmp += "\\b"; break; {
case '\f': tmp += "\\f"; break; switch (c)
case '\n': tmp += "\\n"; break; {
case '\r': tmp += "\\r"; break; case '\b':
case '\t': tmp += "\\t"; break; tmp += "\\b";
case '\\': tmp += "\\\\"; break; break;
case '"' : tmp += "\\\""; break; case '\f':
default : tmp += c; break; tmp += "\\f";
break;
case '\n':
tmp += "\\n";
break;
case '\r':
tmp += "\\r";
break;
case '\t':
tmp += "\\t";
break;
case '\\':
tmp += "\\\\";
break;
case '"':
tmp += "\\\"";
break;
default:
tmp += c;
break;
}
} }
} return tmp;
return tmp;
} }
std::string FormatKV(std::string const& key, std::string const& value) { std::string FormatKV(std::string const &key, std::string const &value)
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
} }
std::string FormatKV(std::string const& key, const char* value) { std::string FormatKV(std::string const &key, const char *value)
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
} }
std::string FormatKV(std::string const& key, bool value) { std::string FormatKV(std::string const &key, bool value)
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false"); {
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
} }
std::string FormatKV(std::string const& key, int64_t value) { std::string FormatKV(std::string const &key, int64_t value)
std::stringstream ss; {
ss << '"' << StrEscape(key) << "\": " << value; std::stringstream ss;
return ss.str(); ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
} }
std::string FormatKV(std::string const& key, IterationCount value) { std::string FormatKV(std::string const &key, IterationCount value)
std::stringstream ss; {
ss << '"' << StrEscape(key) << "\": " << value; std::stringstream ss;
return ss.str(); ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
} }
std::string FormatKV(std::string const& key, double value) { std::string FormatKV(std::string const &key, double value)
std::stringstream ss; {
ss << '"' << StrEscape(key) << "\": "; std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
if (std::isnan(value)) if (std::isnan(value))
ss << (value < 0 ? "-" : "") << "NaN"; ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value)) else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity"; ss << (value < 0 ? "-" : "") << "Infinity";
else { else
const auto max_digits10 = {
std::numeric_limits<decltype(value)>::max_digits10; const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1; const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10) ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
<< value; }
} return ss.str();
return ss.str();
} }
int64_t RoundDouble(double v) { return std::lround(v); } int64_t RoundDouble(double v)
{
return std::lround(v);
}
} // end namespace } // end namespace
bool JSONReporter::ReportContext(const Context& context) { bool JSONReporter::ReportContext(const Context &context)
std::ostream& out = GetOutputStream(); {
std::ostream &out = GetOutputStream();
out << "{\n"; out << "{\n";
std::string inner_indent(2, ' '); std::string inner_indent(2, ' ');
// Open context block and print context information. // Open context block and print context information.
out << inner_indent << "\"context\": {\n"; out << inner_indent << "\"context\": {\n";
std::string indent(4, ' '); std::string indent(4, ' ');
std::string walltime_value = LocalDateTimeString(); std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n"; out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) { if (Context::executable_name)
out << indent << FormatKV("executable", Context::executable_name) << ",\n"; {
} out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info; CPUInfo const &info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) << ",\n";
<< ",\n"; out << indent << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n";
out << indent out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) << ",\n";
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
<< ",\n";
out << indent << "\"caches\": [\n"; out << indent << "\"caches\": [\n";
indent = std::string(6, ' '); indent = std::string(6, ' ');
std::string cache_indent(8, ' '); std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) { for (size_t i = 0; i < info.caches.size(); ++i)
auto& CI = info.caches[i]; {
out << indent << "{\n"; auto &CI = info.caches[i];
out << cache_indent << FormatKV("type", CI.type) << ",\n"; out << indent << "{\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) out << cache_indent << FormatKV("type", CI.type) << ",\n";
<< ",\n"; out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) << ",\n";
out << cache_indent out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
<< FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n"; out << cache_indent << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) << "\n";
out << cache_indent out << indent << "}";
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) if (i != info.caches.size() - 1)
<< "\n"; out << ",";
out << indent << "}"; out << "\n";
if (i != info.caches.size() - 1) out << ","; }
out << "\n"; indent = std::string(4, ' ');
} out << indent << "],\n";
indent = std::string(4, ' '); out << indent << "\"load_avg\": [";
out << indent << "],\n"; for (auto it = info.load_avg.begin(); it != info.load_avg.end();)
out << indent << "\"load_avg\": ["; {
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { out << *it++;
out << *it++; if (it != info.load_avg.end())
if (it != info.load_avg.end()) out << ","; out << ",";
} }
out << "],\n"; out << "],\n";
#if defined(NDEBUG) #if defined(NDEBUG)
const char build_type[] = "release"; const char build_type[] = "release";
#else #else
const char build_type[] = "debug"; const char build_type[] = "debug";
#endif #endif
out << indent << FormatKV("library_build_type", build_type) << "\n"; out << indent << FormatKV("library_build_type", build_type) << "\n";
// Close context block and open the list of benchmarks. // Close context block and open the list of benchmarks.
out << inner_indent << "},\n"; out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n"; out << inner_indent << "\"benchmarks\": [\n";
return true; return true;
} }
void JSONReporter::ReportRuns(std::vector<Run> const& reports) { void JSONReporter::ReportRuns(std::vector<Run> const &reports)
if (reports.empty()) { {
return; if (reports.empty())
} {
std::string indent(4, ' '); return;
std::ostream& out = GetOutputStream();
if (!first_report_) {
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
} }
} std::string indent(4, ' ');
} std::ostream &out = GetOutputStream();
if (!first_report_)
void JSONReporter::Finalize() { {
// Close the list of benchmarks and the top level object. out << ",\n";
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char* {
switch (run.run_type) {
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
} }
BENCHMARK_UNREACHABLE(); first_report_ = false;
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("repetition_index", run.repetition_index)
<< ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto& c : run.counters) { for (auto it = reports.begin(); it != reports.end(); ++it)
out << ",\n" << indent << FormatKV(c.first, c.second); {
} out << indent << "{\n";
PrintRunData(*it);
if (run.has_memory_result) { out << indent << '}';
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); auto it_cp = it;
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); if (++it_cp != reports.end())
} {
out << ",\n";
if (!run.report_label.empty()) { }
out << ",\n" << indent << FormatKV("label", run.report_label); }
}
out << '\n';
} }
} // end namespace benchmark void JSONReporter::Finalize()
{
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const &run)
{
std::string indent(6, ' ');
std::ostream &out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char * {
switch (run.run_type)
{
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("repetition_index", run.repetition_index) << ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred)
{
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms)
{
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_big_o)
{
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_rms)
{
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto &c : run.counters)
{
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result)
{
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty())
{
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark

View File

@ -6,65 +6,77 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&); typedef std::basic_ostream<char> &(EndLType)(std::basic_ostream<char> &);
class LogType { class LogType
friend LogType& GetNullLogInstance(); {
friend LogType& GetErrorLogInstance(); friend LogType &GetNullLogInstance();
friend LogType &GetErrorLogInstance();
// FIXME: Add locking to output. // FIXME: Add locking to output.
template <class Tp> template <class Tp> friend LogType &operator<<(LogType &, Tp const &);
friend LogType& operator<<(LogType&, Tp const&); friend LogType &operator<<(LogType &, EndLType *);
friend LogType& operator<<(LogType&, EndLType*);
private: private:
LogType(std::ostream* out) : out_(out) {} LogType(std::ostream *out) : out_(out)
std::ostream* out_; {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); }
std::ostream *out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
}; };
template <class Tp> template <class Tp> LogType &operator<<(LogType &log, Tp const &value)
LogType& operator<<(LogType& log, Tp const& value) { {
if (log.out_) { if (log.out_)
*log.out_ << value; {
} *log.out_ << value;
return log; }
return log;
} }
inline LogType& operator<<(LogType& log, EndLType* m) { inline LogType &operator<<(LogType &log, EndLType *m)
if (log.out_) { {
*log.out_ << m; if (log.out_)
} {
return log; *log.out_ << m;
}
return log;
} }
inline int& LogLevel() { inline int &LogLevel()
static int log_level = 0; {
return log_level; static int log_level = 0;
return log_level;
} }
inline LogType& GetNullLogInstance() { inline LogType &GetNullLogInstance()
static LogType log(nullptr); {
return log; static LogType log(nullptr);
return log;
} }
inline LogType& GetErrorLogInstance() { inline LogType &GetErrorLogInstance()
static LogType log(&std::clog); {
return log; static LogType log(&std::clog);
return log;
} }
inline LogType& GetLogInstanceForLevel(int level) { inline LogType &GetLogInstanceForLevel(int level)
if (level <= LogLevel()) { {
return GetErrorLogInstance(); if (level <= LogLevel())
} {
return GetNullLogInstance(); return GetErrorLogInstance();
}
return GetNullLogInstance();
} }
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark
// clang-format off // clang-format off
#define VLOG(x) \ #define VLOG(x) \

View File

@ -11,7 +11,7 @@
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) #if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else #else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif #endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) #define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
@ -22,49 +22,38 @@
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \ #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \ #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \ #define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \ #define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \ #define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \ #define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \ #define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \ #define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \ #define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \ #define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) #define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \ #define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \ #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark { namespace benchmark
{
typedef std::condition_variable Condition; typedef std::condition_variable Condition;
@ -72,84 +61,114 @@ typedef std::condition_variable Condition;
// we can annotate them with thread safety attributes and use the // we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be // -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations. // used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex { class CAPABILITY("mutex") Mutex
public: {
Mutex() {} public:
Mutex()
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
private:
MutexLockImp ml_;
};
class Barrier {
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool last_thread = false;
{ {
MutexLock ml(lock_);
last_thread = createBarrier(ml);
} }
if (last_thread) phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) { void lock() ACQUIRE()
MutexLock ml(lock_); {
--running_threads_; mut_.lock();
if (entered_ != 0) phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
// else (running_threads_ == entered_) and we are the last thread.
} }
// Last thread has reached the barrier void unlock() RELEASE()
phase_number_++; {
entered_ = 0; mut_.unlock();
return true; }
} std::mutex &native_handle()
{
return mut_;
}
private:
std::mutex mut_;
}; };
} // end namespace benchmark class SCOPED_CAPABILITY MutexLock
{
typedef std::unique_lock<std::mutex> MutexLockImp;
#endif // BENCHMARK_MUTEX_H_ public:
MutexLock(Mutex &m) ACQUIRE(m) : ml_(m.native_handle())
{
}
~MutexLock() RELEASE()
{
}
MutexLockImp &native_handle()
{
return ml_;
}
private:
MutexLockImp ml_;
};
class Barrier
{
public:
Barrier(int num_threads) : running_threads_(num_threads)
{
}
// Called by each thread
bool wait() EXCLUDES(lock_)
{
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread)
phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_)
{
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0)
phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock &ml) REQUIRES(lock_)
{
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_)
{
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_

View File

@ -54,32 +54,36 @@
#include "check.h" #include "check.h"
namespace benchmark { namespace benchmark
{
// A wrapper around the POSIX regular expression API that provides automatic // A wrapper around the POSIX regular expression API that provides automatic
// cleanup // cleanup
class Regex { class Regex
public: {
Regex() : init_(false) {} public:
Regex() : init_(false)
{
}
~Regex(); ~Regex();
// Compile a regular expression matcher from spec. Returns true on success. // Compile a regular expression matcher from spec. Returns true on success.
// //
// On failure (and if error is not nullptr), error is populated with a human // On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs. // readable error message if an error occurs.
bool Init(const std::string& spec, std::string* error); bool Init(const std::string &spec, std::string *error);
// Returns whether str matches the compiled regular expression. // Returns whether str matches the compiled regular expression.
bool Match(const std::string& str); bool Match(const std::string &str);
private: private:
bool init_; bool init_;
// Underlying regular expression object // Underlying regular expression object
#if defined(HAVE_STD_REGEX) #if defined(HAVE_STD_REGEX)
std::regex re_; std::regex re_;
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) #elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_; regex_t re_;
#else #else
#error No regular expression backend implementation available #error No regular expression backend implementation available
#endif #endif
@ -87,72 +91,87 @@ class Regex {
#if defined(HAVE_STD_REGEX) #if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) { inline bool Regex::Init(const std::string &spec, std::string *error)
{
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS #ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning ((void)error); // suppress unused warning
#else #else
try { try
{
#endif #endif
re_ = std::regex(spec, std::regex_constants::extended); re_ = std::regex(spec, std::regex_constants::extended);
init_ = true; init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
} }
catch (const std::regex_error& e) { catch (const std::regex_error &e)
if (error) { {
*error = e.what(); if (error)
} {
*error = e.what();
}
} }
#endif #endif
return init_; return init_;
} }
inline Regex::~Regex() {} inline Regex::~Regex()
{
}
inline bool Regex::Match(const std::string& str) { inline bool Regex::Match(const std::string &str)
if (!init_) { {
return false; if (!init_)
} {
return std::regex_search(str, re_); return false;
}
return std::regex_search(str, re_);
} }
#else #else
inline bool Regex::Init(const std::string& spec, std::string* error) { inline bool Regex::Init(const std::string &spec, std::string *error)
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); {
if (ec != 0) { int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (error) { if (ec != 0)
size_t needed = regerror(ec, &re_, nullptr, 0); {
char* errbuf = new char[needed]; if (error)
regerror(ec, &re_, errbuf, needed); {
size_t needed = regerror(ec, &re_, nullptr, 0);
char *errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate // regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error. // the string, so we move that when assigning to error.
CHECK_NE(needed, 0); CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1); error->assign(errbuf, needed - 1);
delete[] errbuf; delete[] errbuf;
}
return false;
} }
return false; init_ = true;
} return true;
init_ = true;
return true;
} }
inline Regex::~Regex() { inline Regex::~Regex()
if (init_) { {
regfree(&re_); if (init_)
} {
regfree(&re_);
}
} }
inline bool Regex::Match(const std::string& str) { inline bool Regex::Match(const std::string &str)
if (!init_) { {
return false; if (!init_)
} {
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
} }
#endif #endif
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_RE_H_ #endif // BENCHMARK_RE_H_

View File

@ -24,82 +24,97 @@
#include "check.h" #include "check.h"
#include "string_util.h" #include "string_util.h"
namespace benchmark { namespace benchmark
{
BenchmarkReporter::BenchmarkReporter() BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr)
: output_stream_(&std::cout), error_stream_(&std::cerr) {} {
}
BenchmarkReporter::~BenchmarkReporter() {} BenchmarkReporter::~BenchmarkReporter()
{
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out, void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context)
Context const &context) { {
CHECK(out) << "cannot be null"; CHECK(out) << "cannot be null";
auto &Out = *out; auto &Out = *out;
Out << LocalDateTimeString() << "\n"; Out << LocalDateTimeString() << "\n";
if (context.executable_name) if (context.executable_name)
Out << "Running " << context.executable_name << "\n"; Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info; const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X " Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU " << ((info.num_cpus > 1) ? "s" : "") << ")\n";
<< ((info.num_cpus > 1) ? "s" : "") << ")\n"; if (info.caches.size() != 0)
if (info.caches.size() != 0) { {
Out << "CPU Caches:\n"; Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) { for (auto &CInfo : info.caches)
Out << " L" << CInfo.level << " " << CInfo.type << " " {
<< (CInfo.size / 1024) << " KiB"; Out << " L" << CInfo.level << " " << CInfo.type << " " << (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0) if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n"; Out << "\n";
}
} }
} if (!info.load_avg.empty())
if (!info.load_avg.empty()) { {
Out << "Load Average: "; Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { for (auto It = info.load_avg.begin(); It != info.load_avg.end();)
Out << StrFormat("%.2f", *It++); {
if (It != info.load_avg.end()) Out << ", "; Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end())
Out << ", ";
}
Out << "\n";
} }
Out << "\n";
}
if (info.scaling_enabled) { if (info.scaling_enabled)
Out << "***WARNING*** CPU scaling is enabled, the benchmark " {
"real time measurements may be noisy and will incur extra " Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"overhead.\n"; "real time measurements may be noisy and will incur extra "
} "overhead.\n";
}
#ifndef NDEBUG #ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be " Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n"; "affected.\n";
#endif #endif
} }
// No initializer because it's already initialized to NULL. // No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name; const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context() BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get())
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} {
std::string BenchmarkReporter::Run::benchmark_name() const {
std::string name = run_name.str();
if (run_type == RT_Aggregate) {
name += "_" + aggregate_name;
}
return name;
} }
double BenchmarkReporter::Run::GetAdjustedRealTime() const { std::string BenchmarkReporter::Run::benchmark_name() const
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); {
if (iterations != 0) new_time /= static_cast<double>(iterations); std::string name = run_name.str();
return new_time; if (run_type == RT_Aggregate)
{
name += "_" + aggregate_name;
}
return name;
} }
double BenchmarkReporter::Run::GetAdjustedCPUTime() const { double BenchmarkReporter::Run::GetAdjustedRealTime() const
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); {
if (iterations != 0) new_time /= static_cast<double>(iterations); double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
return new_time; if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
} }
} // end namespace benchmark double BenchmarkReporter::Run::GetAdjustedCPUTime() const
{
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark

View File

@ -24,28 +24,36 @@
#include <windows.h> #include <windows.h>
#endif #endif
namespace benchmark { namespace benchmark
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument. // Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } void SleepForMilliseconds(int milliseconds)
void SleepForSeconds(double seconds) { {
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds)); Sleep(milliseconds);
} }
#else // BENCHMARK_OS_WINDOWS void SleepForSeconds(double seconds)
void SleepForMicroseconds(int microseconds) { {
struct timespec sleep_time; SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; }
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; #else // BENCHMARK_OS_WINDOWS
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) void SleepForMicroseconds(int microseconds)
; // Ignore signals and wait for the full interval to elapse. {
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
} }
void SleepForMilliseconds(int milliseconds) { void SleepForMilliseconds(int milliseconds)
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); {
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
} }
void SleepForSeconds(double seconds) { void SleepForSeconds(double seconds)
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond)); {
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
} }
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark } // end namespace benchmark

View File

@ -1,7 +1,8 @@
#ifndef BENCHMARK_SLEEP_H_ #ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_ #define BENCHMARK_SLEEP_H_
namespace benchmark { namespace benchmark
{
const int kNumMillisPerSecond = 1000; const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000; const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
@ -10,6 +11,6 @@ const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
void SleepForMilliseconds(int milliseconds); void SleepForMilliseconds(int milliseconds);
void SleepForSeconds(double seconds); void SleepForSeconds(double seconds);
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_SLEEP_H_ #endif // BENCHMARK_SLEEP_H_

View File

@ -15,179 +15,195 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "check.h"
#include "statistics.h"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <numeric> #include <numeric>
#include <string> #include <string>
#include <vector> #include <vector>
#include "check.h"
#include "statistics.h"
namespace benchmark { namespace benchmark
{
auto StatisticsSum = [](const std::vector<double>& v) { auto StatisticsSum = [](const std::vector<double> &v) { return std::accumulate(v.begin(), v.end(), 0.0); };
return std::accumulate(v.begin(), v.end(), 0.0);
};
double StatisticsMean(const std::vector<double>& v) { double StatisticsMean(const std::vector<double> &v)
if (v.empty()) return 0.0; {
return StatisticsSum(v) * (1.0 / v.size()); if (v.empty())
return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
} }
double StatisticsMedian(const std::vector<double>& v) { double StatisticsMedian(const std::vector<double> &v)
if (v.size() < 3) return StatisticsMean(v); {
std::vector<double> copy(v); if (v.size() < 3)
return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2; auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end()); std::nth_element(copy.begin(), center, copy.end());
// did we have an odd number of samples? // did we have an odd number of samples?
// if yes, then center is the median // if yes, then center is the median
// it no, then we are looking for the average between center and the value // it no, then we are looking for the average between center and the value
// before // before
if (v.size() % 2 == 1) return *center; if (v.size() % 2 == 1)
auto center2 = copy.begin() + v.size() / 2 - 1; return *center;
std::nth_element(copy.begin(), center2, copy.end()); auto center2 = copy.begin() + v.size() / 2 - 1;
return (*center + *center2) / 2.0; std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
} }
// Return the sum of the squares of this sample set // Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double>& v) { auto SumSquares = [](const std::vector<double> &v) { return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); };
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
};
auto Sqr = [](const double dat) { return dat * dat; }; auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) { auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations // Avoid NaN due to imprecision in the calculations
if (dat < 0.0) return 0.0; if (dat < 0.0)
return std::sqrt(dat); return 0.0;
return std::sqrt(dat);
}; };
double StatisticsStdDev(const std::vector<double>& v) { double StatisticsStdDev(const std::vector<double> &v)
const auto mean = StatisticsMean(v); {
if (v.empty()) return mean; const auto mean = StatisticsMean(v);
if (v.empty())
return mean;
// Sample standard deviation is undefined for n = 1 // Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0; if (v.size() == 1)
return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size()); const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
} }
std::vector<BenchmarkReporter::Run> ComputeStats( std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports)
const std::vector<BenchmarkReporter::Run>& reports) { {
typedef BenchmarkReporter::Run Run; typedef BenchmarkReporter::Run Run;
std::vector<Run> results; std::vector<Run> results;
auto error_count = auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const &run) { return run.error_occurred; });
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; }); if (reports.size() - error_count < 2)
{
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat
{
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const &r : reports)
{
for (auto const &cnt : r.counters)
{
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end())
{
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
}
else
{
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const &run : reports)
{
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred)
continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const &cnt : run.counters)
{
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++)
{
if (reports[i].report_label != report_label)
{
report_label = "";
break;
}
}
const double iteration_rescale_factor = double(reports.size()) / double(run_iterations);
for (const auto &Stat : *reports[0].statistics)
{
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const &kv : counter_stats)
{
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return results; return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const& r : reports) {
for (auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) {
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
} else {
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != report_label) {
report_label = "";
break;
}
}
const double iteration_rescale_factor =
double(reports.size()) / double(run_iterations);
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const& kv : counter_stats) {
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
return results;
} }
} // end namespace benchmark } // end namespace benchmark

View File

@ -20,18 +20,18 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// Return a vector containing the mean, median and standard devation information // Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports' // (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned // contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats( std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports);
const std::vector<BenchmarkReporter::Run>& reports);
double StatisticsMean(const std::vector<double>& v); double StatisticsMean(const std::vector<double> &v);
double StatisticsMedian(const std::vector<double>& v); double StatisticsMedian(const std::vector<double> &v);
double StatisticsStdDev(const std::vector<double>& v); double StatisticsStdDev(const std::vector<double> &v);
} // end namespace benchmark } // end namespace benchmark
#endif // STATISTICS_H_ #endif // STATISTICS_H_

View File

@ -12,8 +12,10 @@
#include "arraysize.h" #include "arraysize.h"
namespace benchmark { namespace benchmark
namespace { {
namespace
{
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. // kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY"; const char kBigSIUnits[] = "kMGTPEZY";
@ -23,144 +25,159 @@ const char kBigIECUnits[] = "KMGTPEZY";
const char kSmallSIUnits[] = "munpfazy"; const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size. // We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), "SI and IEC unit arrays must be the same size");
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size"); "Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits); static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision, void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string *mantissa,
double one_k, std::string* mantissa, int64_t *exponent)
int64_t* exponent) { {
std::stringstream mantissa_stream; std::stringstream mantissa_stream;
if (val < 0) { if (val < 0)
mantissa_stream << "-"; {
val = -val; mantissa_stream << "-";
} val = -val;
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold) {
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
scaled /= one_k;
if (scaled <= big_threshold) {
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
} }
mantissa_stream << val;
*exponent = 0; // Adjust threshold so that it never excludes things which can't be rendered
} else if (val < small_threshold) { // in 'precision' digits.
// Negative powers const double adjusted_threshold = std::max(thresh, 1.0 / std::pow(10.0, precision));
if (val < simple_threshold) { const double big_threshold = adjusted_threshold * one_k;
double scaled = val; const double small_threshold = adjusted_threshold;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { // Values in ]simple_threshold,small_threshold[ will be printed as-is
scaled *= one_k; const double simple_threshold = 0.01;
if (scaled >= small_threshold) {
mantissa_stream << scaled; if (val > big_threshold)
*exponent = -static_cast<int64_t>(i + 1); {
*mantissa = mantissa_stream.str(); // Positive powers
return; double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i)
{
scaled /= one_k;
if (scaled <= big_threshold)
{
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
} }
} mantissa_stream << val;
*exponent = 0;
} }
mantissa_stream << val; else if (val < small_threshold)
*exponent = 0; {
} else { // Negative powers
mantissa_stream << val; if (val < simple_threshold)
*exponent = 0; {
} double scaled = val;
*mantissa = mantissa_stream.str(); for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i)
{
scaled *= one_k;
if (scaled >= small_threshold)
{
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
}
else
{
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
} }
std::string ExponentToPrefix(int64_t exponent, bool iec) { std::string ExponentToPrefix(int64_t exponent, bool iec)
if (exponent == 0) return ""; {
if (exponent == 0)
return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return ""; if (index >= kUnitsSize)
return "";
const char* array = const char *array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); if (iec)
if (iec) return array[index] + std::string("i");
return array[index] + std::string("i"); else
else return std::string(1, array[index]);
return std::string(1, array[index]);
} }
std::string ToBinaryStringFullySpecified(double value, double threshold, std::string ToBinaryStringFullySpecified(double value, double threshold, int precision, double one_k = 1024.0)
int precision, double one_k = 1024.0) { {
std::string mantissa; std::string mantissa;
int64_t exponent; int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent);
&exponent); return mantissa + ExponentToPrefix(exponent, false);
return mantissa + ExponentToPrefix(exponent, false);
} }
} // end namespace } // end namespace
void AppendHumanReadable(int n, std::string* str) { void AppendHumanReadable(int n, std::string *str)
std::stringstream ss; {
// Round down to the nearest SI prefix. std::stringstream ss;
ss << ToBinaryStringFullySpecified(n, 1.0, 0); // Round down to the nearest SI prefix.
*str += ss.str(); ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
} }
std::string HumanReadableNumber(double n, double one_k) { std::string HumanReadableNumber(double n, double one_k)
// 1.1 means that figures up to 1.1k should be shown with the next unit down; {
// this softens edge effects. // 1.1 means that figures up to 1.1k should be shown with the next unit down;
// 1 means that we should show one decimal place of precision. // this softens edge effects.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); // 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
} }
std::string StrFormatImp(const char* msg, va_list args) { std::string StrFormatImp(const char *msg, va_list args)
// we might need a second shot at this, so pre-emptivly make a copy {
va_list args_cp; // we might need a second shot at this, so pre-emptivly make a copy
va_copy(args_cp, args); va_list args_cp;
va_copy(args_cp, args);
// TODO(ericwf): use std::array for first attempt to avoid one memory // TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be // allocation guess what the size might be
std::array<char, 256> local_buff; std::array<char, 256> local_buff;
std::size_t size = local_buff.size(); std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk // in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
va_end(args_cp); va_end(args_cp);
// handle empty expansion // handle empty expansion
if (ret == 0) return std::string{}; if (ret == 0)
if (static_cast<std::size_t>(ret) < size) return std::string{};
return std::string(local_buff.data()); if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt. // we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow // add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1; size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]); auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk // in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args); ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get()); return std::string(buff_ptr.get());
} }
std::string StrFormat(const char* format, ...) { std::string StrFormat(const char *format, ...)
va_list args; {
va_start(args, format); va_list args;
std::string tmp = StrFormatImp(format, args); va_start(args, format);
va_end(args); std::string tmp = StrFormatImp(format, args);
return tmp; va_end(args);
return tmp;
} }
#ifdef BENCHMARK_STL_ANDROID_GNUSTL #ifdef BENCHMARK_STL_ANDROID_GNUSTL
@ -170,86 +187,95 @@ std::string StrFormat(const char* format, ...) {
* strtol, strtod. Note that reimplemented functions are in benchmark:: * strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace. * namespace, not std:: namespace.
*/ */
unsigned long stoul(const std::string& str, size_t* pos, int base) { unsigned long stoul(const std::string &str, size_t *pos, int base)
/* Record previous errno */ {
const int oldErrno = errno; /* Record previous errno */
errno = 0; const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base); const unsigned long result = strtoul(strStart, &strEnd, base);
const int strtoulErrno = errno; const int strtoulErrno = errno;
/* Restore previous errno */ /* Restore previous errno */
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtoulErrno == ERANGE) { if (strtoulErrno == ERANGE)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of unsigned long"); throw std::out_of_range("stoul failed: " + str + " is outside of range of unsigned long");
} else if (strEnd == strStart || strtoulErrno != 0) { }
throw std::invalid_argument( else if (strEnd == strStart || strtoulErrno != 0)
"stoul failed: " + str + " is not an integer"); {
} throw std::invalid_argument("stoul failed: " + str + " is not an integer");
if (pos != nullptr) { }
*pos = static_cast<size_t>(strEnd - strStart); if (pos != nullptr)
} {
return result; *pos = static_cast<size_t>(strEnd - strStart);
}
return result;
} }
int stoi(const std::string& str, size_t* pos, int base) { int stoi(const std::string &str, size_t *pos, int base)
/* Record previous errno */ {
const int oldErrno = errno; /* Record previous errno */
errno = 0; const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const long result = strtol(strStart, &strEnd, base); const long result = strtol(strStart, &strEnd, base);
const int strtolErrno = errno; const int strtolErrno = errno;
/* Restore previous errno */ /* Restore previous errno */
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result) { if (strtolErrno == ERANGE || long(int(result)) != result)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of int"); throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtolErrno != 0) { }
throw std::invalid_argument( else if (strEnd == strStart || strtolErrno != 0)
"stoul failed: " + str + " is not an integer"); {
} throw std::invalid_argument("stoul failed: " + str + " is not an integer");
if (pos != nullptr) { }
*pos = static_cast<size_t>(strEnd - strStart); if (pos != nullptr)
} {
return int(result); *pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
} }
double stod(const std::string& str, size_t* pos) { double stod(const std::string &str, size_t *pos)
/* Record previous errno */ {
const int oldErrno = errno; /* Record previous errno */
errno = 0; const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const double result = strtod(strStart, &strEnd); const double result = strtod(strStart, &strEnd);
/* Restore previous errno */ /* Restore previous errno */
const int strtodErrno = errno; const int strtodErrno = errno;
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtodErrno == ERANGE) { if (strtodErrno == ERANGE)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of int"); throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtodErrno != 0) { }
throw std::invalid_argument( else if (strEnd == strStart || strtodErrno != 0)
"stoul failed: " + str + " is not an integer"); {
} throw std::invalid_argument("stoul failed: " + str + " is not an integer");
if (pos != nullptr) { }
*pos = static_cast<size_t>(strEnd - strStart); if (pos != nullptr)
} {
return result; *pos = static_cast<size_t>(strEnd - strStart);
}
return result;
} }
#endif #endif
} // end namespace benchmark } // end namespace benchmark

View File

@ -1,14 +1,15 @@
#ifndef BENCHMARK_STRING_UTIL_H_ #ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_ #define BENCHMARK_STRING_UTIL_H_
#include "internal_macros.h"
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <utility> #include <utility>
#include "internal_macros.h"
namespace benchmark { namespace benchmark
{
void AppendHumanReadable(int n, std::string* str); void AppendHumanReadable(int n, std::string *str);
std::string HumanReadableNumber(double n, double one_k = 1024.0); std::string HumanReadableNumber(double n, double one_k = 1024.0);
@ -18,23 +19,24 @@ __attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
__attribute__((format(printf, 1, 2))) __attribute__((format(printf, 1, 2)))
#endif #endif
std::string std::string
StrFormat(const char* format, ...); StrFormat(const char *format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { inline std::ostream &StrCatImp(std::ostream &out) BENCHMARK_NOEXCEPT
return out; {
return out;
} }
template <class First, class... Rest> template <class First, class... Rest> inline std::ostream &StrCatImp(std::ostream &out, First &&f, Rest &&...rest)
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { {
out << std::forward<First>(f); out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...); return StrCatImp(out, std::forward<Rest>(rest)...);
} }
template <class... Args> template <class... Args> inline std::string StrCat(Args &&...args)
inline std::string StrCat(Args&&... args) { {
std::ostringstream ss; std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...); StrCatImp(ss, std::forward<Args>(args)...);
return ss.str(); return ss.str();
} }
#ifdef BENCHMARK_STL_ANDROID_GNUSTL #ifdef BENCHMARK_STL_ANDROID_GNUSTL
@ -44,16 +46,15 @@ inline std::string StrCat(Args&&... args) {
* strtol, strtod. Note that reimplemented functions are in benchmark:: * strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace. * namespace, not std:: namespace.
*/ */
unsigned long stoul(const std::string& str, size_t* pos = nullptr, unsigned long stoul(const std::string &str, size_t *pos = nullptr, int base = 10);
int base = 10); int stoi(const std::string &str, size_t *pos = nullptr, int base = 10);
int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); double stod(const std::string &str, size_t *pos = nullptr);
double stod(const std::string& str, size_t* pos = nullptr);
#else #else
using std::stoul;
using std::stoi;
using std::stod; using std::stod;
using std::stoi;
using std::stoul;
#endif #endif
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_STRING_UTIL_H_ #endif // BENCHMARK_STRING_UTIL_H_

View File

@ -6,59 +6,68 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "mutex.h" #include "mutex.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
class ThreadManager { class ThreadManager
public: {
explicit ThreadManager(int num_threads) public:
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {} explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads)
{
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
} }
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { Mutex &GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_)
MutexLock lock(end_cond_mutex_); {
end_condition_.wait(lock.native_handle(), return benchmark_mutex_;
[this]() { return alive_threads_ == 0; }); }
}
public: bool StartStopBarrier() EXCLUDES(end_cond_mutex_)
struct Result { {
IterationCount iterations = 0; return start_stop_barrier_.wait();
double real_time_used = 0; }
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private: void NotifyThreadComplete() EXCLUDES(end_cond_mutex_)
mutable Mutex benchmark_mutex_; {
std::atomic<int> alive_threads_; start_stop_barrier_.removeThread();
Barrier start_stop_barrier_; if (--alive_threads_ == 0)
Mutex end_cond_mutex_; {
Condition end_condition_; MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_)
{
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(), [this]() { return alive_threads_ == 0; });
}
public:
struct Result
{
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
}; };
} // namespace internal } // namespace internal
} // namespace benchmark } // namespace benchmark
#endif // BENCHMARK_THREAD_MANAGER_H #endif // BENCHMARK_THREAD_MANAGER_H

View File

@ -4,83 +4,101 @@
#include "check.h" #include "check.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
class ThreadTimer { class ThreadTimer
explicit ThreadTimer(bool measure_process_cpu_time_) {
: measure_process_cpu_time(measure_process_cpu_time_) {} explicit ThreadTimer(bool measure_process_cpu_time_) : measure_process_cpu_time(measure_process_cpu_time_)
{
}
public: public:
static ThreadTimer Create() { static ThreadTimer Create()
return ThreadTimer(/*measure_process_cpu_time_=*/false); {
} return ThreadTimer(/*measure_process_cpu_time_=*/false);
static ThreadTimer CreateProcessCpuTime() { }
return ThreadTimer(/*measure_process_cpu_time_=*/true); static ThreadTimer CreateProcessCpuTime()
} {
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
// Called by each thread // Called by each thread
void StartTimer() { void StartTimer()
running_ = true; {
start_real_time_ = ChronoClockNow(); running_ = true;
start_cpu_time_ = ReadCpuTimerOfChoice(); start_real_time_ = ChronoClockNow();
} start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread // Called by each thread
void StopTimer() { void StopTimer()
CHECK(running_); {
running_ = false; CHECK(running_);
real_time_used_ += ChronoClockNow() - start_real_time_; running_ = false;
// Floating point error can result in the subtraction producing a negative real_time_used_ += ChronoClockNow() - start_real_time_;
// time. Guard against that. // Floating point error can result in the subtraction producing a negative
cpu_time_used_ += // time. Guard against that.
std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0); cpu_time_used_ += std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
} }
// Called by each thread // Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; } void SetIterationTime(double seconds)
{
manual_time_used_ += seconds;
}
bool running() const { return running_; } bool running() const
{
return running_;
}
// REQUIRES: timer is not running // REQUIRES: timer is not running
double real_time_used() const { double real_time_used() const
CHECK(!running_); {
return real_time_used_; CHECK(!running_);
} return real_time_used_;
}
// REQUIRES: timer is not running // REQUIRES: timer is not running
double cpu_time_used() const { double cpu_time_used() const
CHECK(!running_); {
return cpu_time_used_; CHECK(!running_);
} return cpu_time_used_;
}
// REQUIRES: timer is not running // REQUIRES: timer is not running
double manual_time_used() const { double manual_time_used() const
CHECK(!running_); {
return manual_time_used_; CHECK(!running_);
} return manual_time_used_;
}
private: private:
double ReadCpuTimerOfChoice() const { double ReadCpuTimerOfChoice() const
if (measure_process_cpu_time) return ProcessCPUUsage(); {
return ThreadCPUUsage(); if (measure_process_cpu_time)
} return ProcessCPUUsage();
return ThreadCPUUsage();
}
// should the thread, or the process, time be measured? // should the thread, or the process, time be measured?
const bool measure_process_cpu_time; const bool measure_process_cpu_time;
bool running_ = false; // Is the timer running bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_ double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_ double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_) // Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0; double real_time_used_ = 0;
double cpu_time_used_ = 0; double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds). // Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0; double manual_time_used_ = 0;
}; };
} // namespace internal } // namespace internal
} // namespace benchmark } // namespace benchmark
#endif // BENCHMARK_THREAD_TIMER_H #endif // BENCHMARK_THREAD_TIMER_H

View File

@ -17,7 +17,7 @@
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h> #include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h> #include <versionhelpers.h>
#include <windows.h> #include <windows.h>
#else #else
@ -26,7 +26,7 @@
#include <sys/resource.h> #include <sys/resource.h>
#endif #endif
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h> #include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX #if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h> #include <sys/sysctl.h>
@ -57,161 +57,172 @@
#include "sleep.h" #include "sleep.h"
#include "string_util.h" #include "string_util.h"
namespace benchmark { namespace benchmark
{
// Suppress unused warnings on helper functions. // Suppress unused warnings on helper functions.
#if defined(__GNUC__) #if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function" #pragma GCC diagnostic ignored "-Wunused-function"
#endif #endif
namespace { namespace
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { double MakeTime(FILETIME const &kernel_time, FILETIME const &user_time)
ULARGE_INTEGER kernel; {
ULARGE_INTEGER user; ULARGE_INTEGER kernel;
kernel.HighPart = kernel_time.dwHighDateTime; ULARGE_INTEGER user;
kernel.LowPart = kernel_time.dwLowDateTime; kernel.HighPart = kernel_time.dwHighDateTime;
user.HighPart = user_time.dwHighDateTime; kernel.LowPart = kernel_time.dwLowDateTime;
user.LowPart = user_time.dwLowDateTime; user.HighPart = user_time.dwHighDateTime;
return (static_cast<double>(kernel.QuadPart) + user.LowPart = user_time.dwLowDateTime;
static_cast<double>(user.QuadPart)) * return (static_cast<double>(kernel.QuadPart) + static_cast<double>(user.QuadPart)) * 1e-7;
1e-7;
} }
#elif !defined(BENCHMARK_OS_FUCHSIA) #elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const& ru) { double MakeTime(struct rusage const &ru)
return (static_cast<double>(ru.ru_utime.tv_sec) + {
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 + return (static_cast<double>(ru.ru_utime.tv_sec) + static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
} }
#endif #endif
#if defined(BENCHMARK_OS_MACOSX) #if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) { double MakeTime(thread_basic_info_data_t const &info)
return (static_cast<double>(info.user_time.seconds) + {
static_cast<double>(info.user_time.microseconds) * 1e-6 + return (static_cast<double>(info.user_time.seconds) + static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.microseconds) * 1e-6);
static_cast<double>(info.system_time.microseconds) * 1e-6);
} }
#endif #endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) #if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) { double MakeTime(struct timespec const &ts)
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9); {
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
} }
#endif #endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { BENCHMARK_NORETURN static void DiagnoseAndExit(const char *msg)
std::cerr << "ERROR: " << msg << std::endl; {
std::exit(EXIT_FAILURE); std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
} }
} // end namespace } // end namespace
double ProcessCPUUsage() { double ProcessCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess(); HANDLE proc = GetCurrentProcess();
FILETIME creation_time; FILETIME creation_time;
FILETIME exit_time; FILETIME exit_time;
FILETIME kernel_time; FILETIME kernel_time;
FILETIME user_time; FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, &user_time))
&user_time)) return MakeTime(kernel_time, user_time);
return MakeTime(kernel_time, user_time); DiagnoseAndExit("GetProccessTimes() failed");
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN) #elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the // Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency // same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten. // syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3; return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) #elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292 // https://github.com/google/benchmark/pull/292
struct timespec spec; struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec); return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else #else
struct rusage ru; struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); if (getrusage(RUSAGE_SELF, &ru) == 0)
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif #endif
} }
double ThreadCPUUsage() { double ThreadCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread(); HANDLE this_thread = GetCurrentThread();
FILETIME creation_time; FILETIME creation_time;
FILETIME exit_time; FILETIME exit_time;
FILETIME kernel_time; FILETIME kernel_time;
FILETIME user_time; FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time);
&user_time); return MakeTime(kernel_time, user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX) #elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292 // https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info; thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self()); mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == KERN_SUCCESS)
KERN_SUCCESS) { {
return MakeTime(info); return MakeTime(info);
} }
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
#elif defined(BENCHMARK_OS_EMSCRIPTEN) #elif defined(BENCHMARK_OS_EMSCRIPTEN)
// Emscripten doesn't support traditional threads // Emscripten doesn't support traditional threads
return ProcessCPUUsage(); return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_RTEMS) #elif defined(BENCHMARK_OS_RTEMS)
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage(); return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS) #elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru; struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); if (getrusage(RUSAGE_LWP, &ru) == 0)
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID) #elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts; struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0)
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else #else
#error Per-thread timing is not available on your system. #error Per-thread timing is not available on your system.
#endif #endif
} }
namespace { namespace
{
std::string DateTimeString(bool local) { std::string DateTimeString(bool local)
typedef std::chrono::system_clock Clock; {
std::time_t now = Clock::to_time_t(Clock::now()); typedef std::chrono::system_clock Clock;
const std::size_t kStorageSize = 128; std::time_t now = Clock::to_time_t(Clock::now());
char storage[kStorageSize]; const std::size_t kStorageSize = 128;
std::size_t written; char storage[kStorageSize];
std::size_t written;
if (local) { if (local)
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
written = written = std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else #else
std::tm timeinfo; std::tm timeinfo;
::localtime_r(&now, &timeinfo); ::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif #endif
} else { }
else
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else #else
std::tm timeinfo; std::tm timeinfo;
::gmtime_r(&now, &timeinfo); ::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif #endif
} }
CHECK(written < kStorageSize); CHECK(written < kStorageSize);
((void)written); // prevent unused variable in optimized mode. ((void)written); // prevent unused variable in optimized mode.
return std::string(storage); return std::string(storage);
} }
} // end namespace } // end namespace
std::string LocalDateTimeString() { return DateTimeString(true); } std::string LocalDateTimeString()
{
return DateTimeString(true);
}
} // end namespace benchmark } // end namespace benchmark

View File

@ -4,7 +4,8 @@
#include <chrono> #include <chrono>
#include <string> #include <string>
namespace benchmark { namespace benchmark
{
// Return the CPU usage of the current process // Return the CPU usage of the current process
double ProcessCPUUsage(); double ProcessCPUUsage();
@ -16,33 +17,35 @@ double ChildrenCPUUsage();
double ThreadCPUUsage(); double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK) #if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> struct ChooseSteadyClock
struct ChooseSteadyClock { {
typedef std::chrono::high_resolution_clock type; typedef std::chrono::high_resolution_clock type;
}; };
template <> template <> struct ChooseSteadyClock<false>
struct ChooseSteadyClock<false> { {
typedef std::chrono::steady_clock type; typedef std::chrono::steady_clock type;
}; };
#endif #endif
struct ChooseClockType { struct ChooseClockType
{
#if defined(HAVE_STEADY_CLOCK) #if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type; typedef ChooseSteadyClock<>::type type;
#else #else
typedef std::chrono::high_resolution_clock type; typedef std::chrono::high_resolution_clock type;
#endif #endif
}; };
inline double ChronoClockNow() { inline double ChronoClockNow()
typedef ChooseClockType::type ClockType; {
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>; typedef ChooseClockType::type ClockType;
return FpSeconds(ClockType::now().time_since_epoch()).count(); using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
} }
std::string LocalDateTimeString(); std::string LocalDateTimeString();
} // end namespace benchmark } // end namespace benchmark
#endif // BENCHMARK_TIMERS_H #endif // BENCHMARK_TIMERS_H

View File

@ -3,134 +3,165 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
benchmark::DoNotOptimize(state.iterations()); for (auto _ : state)
} {
benchmark::DoNotOptimize(state.iterations());
}
} }
BENCHMARK(BM_empty); BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu(); BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) { void BM_spin_empty(benchmark::State &state)
for (auto _ : state) { {
for (int x = 0; x < state.range(0); ++x) { for (auto _ : state)
benchmark::DoNotOptimize(x); {
for (int x = 0; x < state.range(0); ++x)
{
benchmark::DoNotOptimize(x);
}
} }
}
} }
BASIC_BENCHMARK_TEST(BM_spin_empty); BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) { void BM_spin_pause_before(benchmark::State &state)
for (int i = 0; i < state.range(0); ++i) { {
benchmark::DoNotOptimize(i); for (int i = 0; i < state.range(0); ++i)
} {
for (auto _ : state) { benchmark::DoNotOptimize(i);
for (int i = 0; i < state.range(0); ++i) { }
benchmark::DoNotOptimize(i); for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
} }
}
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_before); BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) { void BM_spin_pause_during(benchmark::State &state)
for (auto _ : state) { {
state.PauseTiming(); for (auto _ : state)
for (int i = 0; i < state.range(0); ++i) { {
benchmark::DoNotOptimize(i); state.PauseTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
} }
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_during); BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) { void BM_pause_during(benchmark::State &state)
for (auto _ : state) { {
state.PauseTiming(); for (auto _ : state)
state.ResumeTiming(); {
} state.PauseTiming();
state.ResumeTiming();
}
} }
BENCHMARK(BM_pause_during); BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu(); BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime(); BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) { void BM_spin_pause_after(benchmark::State &state)
for (auto _ : state) { {
for (int i = 0; i < state.range(0); ++i) { for (auto _ : state)
benchmark::DoNotOptimize(i); {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
} }
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_after); BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) { void BM_spin_pause_before_and_after(benchmark::State &state)
for (int i = 0; i < state.range(0); ++i) { {
benchmark::DoNotOptimize(i); for (int i = 0; i < state.range(0); ++i)
} {
for (auto _ : state) { benchmark::DoNotOptimize(i);
for (int i = 0; i < state.range(0); ++i) { }
benchmark::DoNotOptimize(i); for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
} }
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) { void BM_empty_stop_start(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State &state)
void BM_KeepRunning(benchmark::State& state) { {
benchmark::IterationCount iter_count = 0; benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations()); assert(iter_count == state.iterations());
while (state.KeepRunning()) { while (state.KeepRunning())
++iter_count; {
} ++iter_count;
assert(iter_count == state.iterations()); }
assert(iter_count == state.iterations());
} }
BENCHMARK(BM_KeepRunning); BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) { void BM_KeepRunningBatch(benchmark::State &state)
// Choose a prime batch size to avoid evenly dividing max_iterations. {
const benchmark::IterationCount batch_size = 101; // Choose a prime batch size to avoid evenly dividing max_iterations.
benchmark::IterationCount iter_count = 0; const benchmark::IterationCount batch_size = 101;
while (state.KeepRunningBatch(batch_size)) { benchmark::IterationCount iter_count = 0;
iter_count += batch_size; while (state.KeepRunningBatch(batch_size))
} {
assert(state.iterations() == iter_count); iter_count += batch_size;
}
assert(state.iterations() == iter_count);
} }
BENCHMARK(BM_KeepRunningBatch); BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State& state) { void BM_RangedFor(benchmark::State &state)
benchmark::IterationCount iter_count = 0; {
for (auto _ : state) { benchmark::IterationCount iter_count = 0;
++iter_count; for (auto _ : state)
} {
assert(iter_count == state.max_iterations); ++iter_count;
}
assert(iter_count == state.max_iterations);
} }
BENCHMARK(BM_RangedFor); BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to // Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits. // instantiate std::iterator_traits.
static_assert(std::is_same< static_assert(std::is_same<typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename std::iterator_traits<benchmark::State::StateIterator>::value_type, typename benchmark::State::StateIterator::value_type>::value,
typename benchmark::State::StateIterator::value_type>::value, ""); "");
BENCHMARK_MAIN(); BENCHMARK_MAIN();

View File

@ -4,125 +4,141 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace { namespace internal
{
namespace
{
TEST(AddRangeTest, Simple) { TEST(AddRangeTest, Simple)
std::vector<int> dst; {
AddRange(&dst, 1, 2, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(1, 2)); AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
} }
TEST(AddRangeTest, Simple64) { TEST(AddRangeTest, Simple64)
std::vector<int64_t> dst; {
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2); std::vector<int64_t> dst;
EXPECT_THAT(dst, testing::ElementsAre(1, 2)); AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
} }
TEST(AddRangeTest, Advanced) { TEST(AddRangeTest, Advanced)
std::vector<int> dst; {
AddRange(&dst, 5, 15, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
} }
TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, Advanced64)
std::vector<int64_t> dst; {
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2); std::vector<int64_t> dst;
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
} }
TEST(AddRangeTest, FullRange8) { TEST(AddRangeTest, FullRange8)
std::vector<int8_t> dst; {
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8); std::vector<int8_t> dst;
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
} }
TEST(AddRangeTest, FullRange64) { TEST(AddRangeTest, FullRange64)
std::vector<int64_t> dst; {
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024); std::vector<int64_t> dst;
EXPECT_THAT( AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, EXPECT_THAT(dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1099511627776LL, 1125899906842624LL, 1152921504606846976LL, 9223372036854775807LL));
1152921504606846976LL, 9223372036854775807LL));
} }
TEST(AddRangeTest, NegativeRanges) { TEST(AddRangeTest, NegativeRanges)
std::vector<int> dst; {
AddRange(&dst, -8, 0, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0)); AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
} }
TEST(AddRangeTest, StrictlyNegative) { TEST(AddRangeTest, StrictlyNegative)
std::vector<int> dst; {
AddRange(&dst, -8, -1, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1)); AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
} }
TEST(AddRangeTest, SymmetricNegativeRanges) { TEST(AddRangeTest, SymmetricNegativeRanges)
std::vector<int> dst; {
AddRange(&dst, -8, 8, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8)); AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
} }
TEST(AddRangeTest, SymmetricNegativeRangesOddMult) { TEST(AddRangeTest, SymmetricNegativeRangesOddMult)
std::vector<int> dst; {
AddRange(&dst, -30, 32, 5); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32)); AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
} }
TEST(AddRangeTest, NegativeRangesAsymmetric) { TEST(AddRangeTest, NegativeRangesAsymmetric)
std::vector<int> dst; {
AddRange(&dst, -3, 5, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5)); AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
} }
TEST(AddRangeTest, NegativeRangesLargeStep) { TEST(AddRangeTest, NegativeRangesLargeStep)
// Always include -1, 0, 1 when crossing zero. {
std::vector<int> dst; // Always include -1, 0, 1 when crossing zero.
AddRange(&dst, -8, 8, 10); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8)); AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
} }
TEST(AddRangeTest, ZeroOnlyRange) { TEST(AddRangeTest, ZeroOnlyRange)
std::vector<int> dst; {
AddRange(&dst, 0, 0, 2); std::vector<int> dst;
EXPECT_THAT(dst, testing::ElementsAre(0)); AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
} }
TEST(AddRangeTest, NegativeRange64) { TEST(AddRangeTest, NegativeRange64)
std::vector<int64_t> dst; {
AddRange<int64_t>(&dst, -4, 4, 2); std::vector<int64_t> dst;
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4)); AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
} }
TEST(AddRangeTest, NegativeRangePreservesExistingOrder) { TEST(AddRangeTest, NegativeRangePreservesExistingOrder)
// If elements already exist in the range, ensure we don't change {
// their ordering by adding negative values. // If elements already exist in the range, ensure we don't change
std::vector<int64_t> dst = {1, 2, 3}; // their ordering by adding negative values.
AddRange<int64_t>(&dst, -2, 2, 2); std::vector<int64_t> dst = {1, 2, 3};
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2)); AddRange<int64_t>(&dst, -2, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
} }
TEST(AddRangeTest, FullNegativeRange64) { TEST(AddRangeTest, FullNegativeRange64)
std::vector<int64_t> dst; {
const auto min = std::numeric_limits<int64_t>::min(); std::vector<int64_t> dst;
const auto max = std::numeric_limits<int64_t>::max(); const auto min = std::numeric_limits<int64_t>::min();
AddRange(&dst, min, max, 1024); const auto max = std::numeric_limits<int64_t>::max();
EXPECT_THAT( AddRange(&dst, min, max, 1024);
dst, testing::ElementsAreArray(std::vector<int64_t>{ EXPECT_THAT(dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL, min, -1152921504606846976LL, -1125899906842624LL, -1099511627776LL, -1073741824LL, -1048576LL,
-1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL, -1024LL, -1LL, 0LL, 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1152921504606846976LL, max}));
1125899906842624LL, 1152921504606846976LL, max}));
} }
TEST(AddRangeTest, Simple8) { TEST(AddRangeTest, Simple8)
std::vector<int8_t> dst; {
AddRange<int8_t>(&dst, 1, 8, 2); std::vector<int8_t> dst;
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
} }
} // namespace } // namespace
} // namespace internal } // namespace internal
} // namespace benchmark } // namespace benchmark

View File

@ -1,74 +1,84 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
{
using namespace benchmark; using namespace benchmark;
using namespace benchmark::internal; using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty) { TEST(BenchmarkNameTest, Empty)
const auto name = BenchmarkName(); {
EXPECT_EQ(name.str(), std::string()); const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
} }
TEST(BenchmarkNameTest, FunctionName) { TEST(BenchmarkNameTest, FunctionName)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
EXPECT_EQ(name.str(), "function_name"); name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
} }
TEST(BenchmarkNameTest, FunctionNameAndArgs) { TEST(BenchmarkNameTest, FunctionNameAndArgs)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.args = "some_args:3/4/5"; name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5"); name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
} }
TEST(BenchmarkNameTest, MinTime) { TEST(BenchmarkNameTest, MinTime)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.args = "some_args:3/4"; name.function_name = "function_name";
name.min_time = "min_time:3.4s"; name.args = "some_args:3/4";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
} }
TEST(BenchmarkNameTest, Iterations) { TEST(BenchmarkNameTest, Iterations)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.min_time = "min_time:3.4s"; name.function_name = "function_name";
name.iterations = "iterations:42"; name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42"); name.iterations = "iterations:42";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
} }
TEST(BenchmarkNameTest, Repetitions) { TEST(BenchmarkNameTest, Repetitions)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.min_time = "min_time:3.4s"; name.function_name = "function_name";
name.repetitions = "repetitions:24"; name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24"); name.repetitions = "repetitions:24";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
} }
TEST(BenchmarkNameTest, TimeType) { TEST(BenchmarkNameTest, TimeType)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.min_time = "min_time:3.4s"; name.function_name = "function_name";
name.time_type = "hammer_time"; name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time"); name.time_type = "hammer_time";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
} }
TEST(BenchmarkNameTest, Threads) { TEST(BenchmarkNameTest, Threads)
auto name = BenchmarkName(); {
name.function_name = "function_name"; auto name = BenchmarkName();
name.min_time = "min_time:3.4s"; name.function_name = "function_name";
name.threads = "threads:256"; name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256"); name.threads = "threads:256";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
} }
TEST(BenchmarkNameTest, TestEmptyFunctionName) { TEST(BenchmarkNameTest, TestEmptyFunctionName)
auto name = BenchmarkName(); {
name.args = "first:3/second:4"; auto name = BenchmarkName();
name.threads = "threads:22"; name.args = "first:3/second:4";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22"); name.threads = "threads:22";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
} }
} // end namespace } // end namespace

View File

@ -24,219 +24,252 @@
#define BENCHMARK_NOINLINE #define BENCHMARK_NOINLINE
#endif #endif
namespace { namespace
{
int BENCHMARK_NOINLINE Factorial(uint32_t n) { int BENCHMARK_NOINLINE Factorial(uint32_t n)
return (n == 1) ? 1 : n * Factorial(n - 1); {
return (n == 1) ? 1 : n * Factorial(n - 1);
} }
double CalculatePi(int depth) { double CalculatePi(int depth)
double pi = 0.0; {
for (int i = 0; i < depth; ++i) { double pi = 0.0;
double numerator = static_cast<double>(((i % 2) * 2) - 1); for (int i = 0; i < depth; ++i)
double denominator = static_cast<double>((2 * i) - 1); {
pi += numerator / denominator; double numerator = static_cast<double>(((i % 2) * 2) - 1);
} double denominator = static_cast<double>((2 * i) - 1);
return (pi - 1.0) * 4; pi += numerator / denominator;
}
return (pi - 1.0) * 4;
} }
std::set<int64_t> ConstructRandomSet(int64_t size) { std::set<int64_t> ConstructRandomSet(int64_t size)
std::set<int64_t> s; {
for (int i = 0; i < size; ++i) s.insert(s.end(), i); std::set<int64_t> s;
return s; for (int i = 0; i < size; ++i)
s.insert(s.end(), i);
return s;
} }
std::mutex test_vector_mu; std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr; std::vector<int> *test_vector = nullptr;
} // end namespace } // end namespace
static void BM_Factorial(benchmark::State& state) { static void BM_Factorial(benchmark::State &state)
int fac_42 = 0; {
for (auto _ : state) fac_42 = Factorial(8); int fac_42 = 0;
// Prevent compiler optimizations for (auto _ : state)
std::stringstream ss; fac_42 = Factorial(8);
ss << fac_42; // Prevent compiler optimizations
state.SetLabel(ss.str()); std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
} }
BENCHMARK(BM_Factorial); BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime(); BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) { static void BM_CalculatePiRange(benchmark::State &state)
double pi = 0.0; {
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0))); double pi = 0.0;
std::stringstream ss; for (auto _ : state)
ss << pi; pi = CalculatePi(static_cast<int>(state.range(0)));
state.SetLabel(ss.str()); std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
} }
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) { static void BM_CalculatePi(benchmark::State &state)
static const int depth = 1024; {
for (auto _ : state) { static const int depth = 1024;
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth))); for (auto _ : state)
} {
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
} }
BENCHMARK(BM_CalculatePi)->Threads(8); BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) { static void BM_SetInsert(benchmark::State &state)
std::set<int64_t> data; {
for (auto _ : state) { std::set<int64_t> data;
state.PauseTiming(); for (auto _ : state)
data = ConstructRandomSet(state.range(0)); {
state.ResumeTiming(); state.PauseTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand()); data = ConstructRandomSet(state.range(0));
} state.ResumeTiming();
state.SetItemsProcessed(state.iterations() * state.range(1)); for (int j = 0; j < state.range(1); ++j)
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
} }
// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, // Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
// non-timed part of each iteration will make the benchmark take forever. // non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container, template <typename Container, typename ValueType = typename Container::value_type>
typename ValueType = typename Container::value_type> static void BM_Sequential(benchmark::State &state)
static void BM_Sequential(benchmark::State& state) { {
ValueType v = 42; ValueType v = 42;
for (auto _ : state) { for (auto _ : state)
Container c; {
for (int64_t i = state.range(0); --i;) c.push_back(v); Container c;
} for (int64_t i = state.range(0); --i;)
const int64_t items_processed = state.iterations() * state.range(0); c.push_back(v);
state.SetItemsProcessed(items_processed); }
state.SetBytesProcessed(items_processed * sizeof(v)); const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
} }
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int) BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10);
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10); BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. // Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512); BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif #endif
static void BM_StringCompare(benchmark::State& state) { static void BM_StringCompare(benchmark::State &state)
size_t len = static_cast<size_t>(state.range(0)); {
std::string s1(len, '-'); size_t len = static_cast<size_t>(state.range(0));
std::string s2(len, '-'); std::string s1(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); std::string s2(len, '-');
for (auto _ : state)
benchmark::DoNotOptimize(s1.compare(s2));
} }
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) { static void BM_SetupTeardown(benchmark::State &state)
if (state.thread_index == 0) { {
// No need to lock test_vector_mu here as this is running single-threaded. if (state.thread_index == 0)
test_vector = new std::vector<int>(); {
} // No need to lock test_vector_mu here as this is running single-threaded.
int i = 0; test_vector = new std::vector<int>();
for (auto _ : state) { }
std::lock_guard<std::mutex> l(test_vector_mu); int i = 0;
if (i % 2 == 0) for (auto _ : state)
test_vector->push_back(i); {
else std::lock_guard<std::mutex> l(test_vector_mu);
test_vector->pop_back(); if (i % 2 == 0)
++i; test_vector->push_back(i);
} else
if (state.thread_index == 0) { test_vector->pop_back();
delete test_vector; ++i;
} }
if (state.thread_index == 0)
{
delete test_vector;
}
} }
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) { static void BM_LongTest(benchmark::State &state)
double tracker = 0.0; {
for (auto _ : state) { double tracker = 0.0;
for (int i = 0; i < state.range(0); ++i) for (auto _ : state)
benchmark::DoNotOptimize(tracker += i); {
} for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
} }
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) { static void BM_ParallelMemset(benchmark::State &state)
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int)); {
int thread_size = static_cast<int>(size) / state.threads; int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int from = thread_size * state.thread_index; int thread_size = static_cast<int>(size) / state.threads;
int to = from + thread_size; int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) { if (state.thread_index == 0)
test_vector = new std::vector<int>(static_cast<size_t>(size)); {
} test_vector = new std::vector<int>(static_cast<size_t>(size));
for (auto _ : state) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
} }
}
if (state.thread_index == 0) { for (auto _ : state)
delete test_vector; {
} for (int i = from; i < to; i++)
{
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0)
{
delete test_vector;
}
} }
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) { static void BM_ManualTiming(benchmark::State &state)
int64_t slept_for = 0; {
int64_t microseconds = state.range(0); int64_t slept_for = 0;
std::chrono::duration<double, std::micro> sleep_duration{ int64_t microseconds = state.range(0);
static_cast<double>(microseconds)}; std::chrono::duration<double, std::micro> sleep_duration{static_cast<double>(microseconds)};
for (auto _ : state) { for (auto _ : state)
auto start = std::chrono::high_resolution_clock::now(); {
// Simulate some useful workload with a sleep auto start = std::chrono::high_resolution_clock::now();
std::this_thread::sleep_for( // Simulate some useful workload with a sleep
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)); std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now(); auto end = std::chrono::high_resolution_clock::now();
auto elapsed = auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count()); state.SetIterationTime(elapsed.count());
slept_for += microseconds; slept_for += microseconds;
} }
state.SetItemsProcessed(slept_for); state.SetItemsProcessed(slept_for);
} }
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
template <class... Args> template <class... Args> void BM_with_args(benchmark::State &state, Args &&...)
void BM_with_args(benchmark::State& state, Args&&...) { {
for (auto _ : state) { for (auto _ : state)
} {
}
} }
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8));
std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) { void BM_non_template_args(benchmark::State &state, int, double)
while(state.KeepRunning()) {} {
while (state.KeepRunning())
{
}
} }
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11 #endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) { static void BM_DenseThreadRanges(benchmark::State &st)
switch (st.range(0)) { {
switch (st.range(0))
{
case 1: case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3); assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break; break;
case 2: case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4); assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break; break;
case 3: case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || st.threads == 14);
st.threads == 14); break;
break;
default: default:
assert(false && "Invalid test case number"); assert(false && "Invalid test case number");
} }
while (st.KeepRunning()) { while (st.KeepRunning())
} {
}
} }
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);

View File

@ -4,61 +4,65 @@
#pragma clang diagnostic ignored "-Wreturn-type" #pragma clang diagnostic ignored "-Wreturn-type"
#endif #endif
extern "C" { extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
} }
// CHECK-LABEL: test_basic: // CHECK-LABEL: test_basic:
extern "C" void test_basic() { extern "C" void test_basic()
int x; {
benchmark::DoNotOptimize(&x); int x;
x = 101; benchmark::DoNotOptimize(&x);
benchmark::ClobberMemory(); x = 101;
// CHECK: leaq [[DEST:[^,]+]], %rax benchmark::ClobberMemory();
// CHECK: movl $101, [[DEST]] // CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ret // CHECK: movl $101, [[DEST]]
// CHECK: ret
} }
// CHECK-LABEL: test_redundant_store: // CHECK-LABEL: test_redundant_store:
extern "C" void test_redundant_store() { extern "C" void test_redundant_store()
ExternInt = 3; {
benchmark::ClobberMemory(); ExternInt = 3;
ExternInt = 51; benchmark::ClobberMemory();
// CHECK-DAG: ExternInt ExternInt = 51;
// CHECK-DAG: movl $3 // CHECK-DAG: ExternInt
// CHECK: movl $51 // CHECK-DAG: movl $3
// CHECK: movl $51
} }
// CHECK-LABEL: test_redundant_read: // CHECK-LABEL: test_redundant_read:
extern "C" void test_redundant_read() { extern "C" void test_redundant_read()
int x; {
benchmark::DoNotOptimize(&x); int x;
x = ExternInt; benchmark::DoNotOptimize(&x);
benchmark::ClobberMemory(); x = ExternInt;
x = ExternInt2; benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax x = ExternInt2;
// CHECK: ExternInt(%rip) // CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: movl %eax, [[DEST]] // CHECK: ExternInt(%rip)
// CHECK-NOT: ExternInt2 // CHECK: movl %eax, [[DEST]]
// CHECK: ret // CHECK-NOT: ExternInt2
// CHECK: ret
} }
// CHECK-LABEL: test_redundant_read2: // CHECK-LABEL: test_redundant_read2:
extern "C" void test_redundant_read2() { extern "C" void test_redundant_read2()
int x; {
benchmark::DoNotOptimize(&x); int x;
x = ExternInt; benchmark::DoNotOptimize(&x);
benchmark::ClobberMemory(); x = ExternInt;
x = ExternInt2; benchmark::ClobberMemory();
benchmark::ClobberMemory(); x = ExternInt2;
// CHECK: leaq [[DEST:[^,]+]], %rax benchmark::ClobberMemory();
// CHECK: ExternInt(%rip) // CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: movl %eax, [[DEST]] // CHECK: ExternInt(%rip)
// CHECK: ExternInt2(%rip) // CHECK: movl %eax, [[DEST]]
// CHECK: movl %eax, [[DEST]] // CHECK: ExternInt2(%rip)
// CHECK: ret // CHECK: movl %eax, [[DEST]]
// CHECK: ret
} }

View File

@ -4,198 +4,215 @@
#include "../src/internal_macros.h" #include "../src/internal_macros.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace benchmark { namespace benchmark
namespace { {
namespace
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
int setenv(const char* name, const char* value, int overwrite) { int setenv(const char *name, const char *value, int overwrite)
if (!overwrite) { {
// NOTE: getenv_s is far superior but not available under mingw. if (!overwrite)
char* env_value = getenv(name); {
if (env_value == nullptr) { // NOTE: getenv_s is far superior but not available under mingw.
return -1; char *env_value = getenv(name);
if (env_value == nullptr)
{
return -1;
}
} }
} return _putenv_s(name, value);
return _putenv_s(name, value);
} }
int unsetenv(const char* name) { int unsetenv(const char *name)
return _putenv_s(name, ""); {
return _putenv_s(name, "");
} }
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) { TEST(BoolFromEnv, Default)
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); {
EXPECT_EQ(BoolFromEnv("not_in_env", true), true); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
} }
TEST(BoolFromEnv, False) { TEST(BoolFromEnv, False)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0); {
EXPECT_EQ(BoolFromEnv("in_env", true), false); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(BoolFromEnv, True) { TEST(BoolFromEnv, True)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0); {
EXPECT_EQ(BoolFromEnv("in_env", false), true); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
#ifndef BENCHMARK_OS_WINDOWS #ifndef BENCHMARK_OS_WINDOWS
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
#endif #endif
} }
TEST(Int32FromEnv, NotInEnv) { TEST(Int32FromEnv, NotInEnv)
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); {
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
} }
TEST(Int32FromEnv, InvalidInteger) { TEST(Int32FromEnv, InvalidInteger)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); {
EXPECT_EQ(Int32FromEnv("in_env", 42), 42); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("BENCHMARK_IN_ENV");
} }
TEST(Int32FromEnv, ValidInteger) { TEST(Int32FromEnv, ValidInteger)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0); {
EXPECT_EQ(Int32FromEnv("in_env", 64), 42); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("BENCHMARK_IN_ENV");
} }
TEST(DoubleFromEnv, NotInEnv) { TEST(DoubleFromEnv, NotInEnv)
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); {
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
} }
TEST(DoubleFromEnv, InvalidReal) { TEST(DoubleFromEnv, InvalidReal)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); {
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("BENCHMARK_IN_ENV");
} }
TEST(DoubleFromEnv, ValidReal) { TEST(DoubleFromEnv, ValidReal)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0); {
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("BENCHMARK_IN_ENV");
} }
TEST(StringFromEnv, Default) { TEST(StringFromEnv, Default)
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); {
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo"); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
} }
TEST(StringFromEnv, Valid) { TEST(StringFromEnv, Valid)
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); {
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo"); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
unsetenv("BENCHMARK_IN_ENV"); EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("BENCHMARK_IN_ENV");
} }
} // namespace } // namespace
} // namespace benchmark } // namespace benchmark

View File

@ -1,74 +1,73 @@
#undef NDEBUG #undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
#include <cmath> #include <cmath>
#include <cstdlib> #include <cstdlib>
#include <vector> #include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace { namespace
{
#define ADD_COMPLEXITY_CASES(...) \ #define ADD_COMPLEXITY_CASES(...) int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name, int AddComplexityTest(std::string test_name, std::string big_o_test_name, std::string rms_test_name, std::string big_o)
std::string rms_test_name, std::string big_o) { {
SetSubstitutions({{"%name", test_name}, SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name}, {"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name}, {"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o}, {"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o}, {"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}}); {"%rms", "[ ]*[0-9]+ %"}});
AddCases( AddCases(TC_ConsoleOut, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
TC_ConsoleOut, {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, {"^%rms_name %rms %rms[ ]*$", MR_Next}});
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
{"^%rms_name %rms %rms[ ]*$", MR_Next}}); {"\"run_name\": \"%name\",$", MR_Next},
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"run_name\": \"%name\",$", MR_Next}, {"\"repetitions\": %int,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetitions\": %int,$", MR_Next}, {"\"aggregate_name\": \"BigO\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"cpu_coefficient\": %float,$", MR_Next},
{"\"aggregate_name\": \"BigO\",$", MR_Next}, {"\"real_coefficient\": %float,$", MR_Next},
{"\"cpu_coefficient\": %float,$", MR_Next}, {"\"big_o\": \"%bigo\",$", MR_Next},
{"\"real_coefficient\": %float,$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next}, {"}", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next}, {"\"name\": \"%rms_name\",$"},
{"}", MR_Next}, {"\"run_name\": \"%name\",$", MR_Next},
{"\"name\": \"%rms_name\",$"}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"run_name\": \"%name\",$", MR_Next}, {"\"repetitions\": %int,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetitions\": %int,$", MR_Next}, {"\"aggregate_name\": \"RMS\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"rms\": %float$", MR_Next},
{"\"aggregate_name\": \"RMS\",$", MR_Next}, {"}", MR_Next}});
{"\"rms\": %float$", MR_Next}, AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"}", MR_Next}}); {"^\"%bigo_name\"", MR_Not},
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
{"^\"%bigo_name\"", MR_Not}, return 0;
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
} }
} // end namespace } // end namespace
// ========================================================================= // // ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- // // --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) { void BM_Complexity_O1(benchmark::State &state)
for (auto _ : state) { {
for (int i = 0; i < 1024; ++i) { for (auto _ : state)
benchmark::DoNotOptimize(&i); {
for (int i = 0; i < 1024; ++i)
{
benchmark::DoNotOptimize(&i);
}
} }
} state.SetComplexityN(state.range(0));
state.SetComplexityN(state.range(0));
} }
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1) BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](benchmark::IterationCount) { return 1.0; });
->Range(1, 1 << 18)
->Complexity([](benchmark::IterationCount) { return 1.0; });
const char *one_test_name = "BM_Complexity_O1"; const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
@ -81,53 +80,46 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)"; const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
enum_big_o_1);
// Add auto enum tests // Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
auto_big_o_1);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
lambda_big_o_1);
// ========================================================================= // // ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- // // --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= // // ========================================================================= //
std::vector<int> ConstructRandomVector(int64_t size) { std::vector<int> ConstructRandomVector(int64_t size)
std::vector<int> v; {
v.reserve(static_cast<int>(size)); std::vector<int> v;
for (int i = 0; i < size; ++i) { v.reserve(static_cast<int>(size));
v.push_back(static_cast<int>(std::rand() % size)); for (int i = 0; i < size; ++i)
} {
return v; v.push_back(static_cast<int>(std::rand() % size));
}
return v;
} }
void BM_Complexity_O_N(benchmark::State& state) { void BM_Complexity_O_N(benchmark::State &state)
auto v = ConstructRandomVector(state.range(0)); {
// Test worst case scenario (item not in vector) auto v = ConstructRandomVector(state.range(0));
const int64_t item_not_in_vector = state.range(0) * 2; // Test worst case scenario (item not in vector)
for (auto _ : state) { const int64_t item_not_in_vector = state.range(0) * 2;
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); for (auto _ : state)
} {
state.SetComplexityN(state.range(0)); benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
} }
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2) ->RangeMultiplier(2)
->Range(1 << 10, 1 << 16) ->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN); ->Complexity([](benchmark::IterationCount n) -> double { return static_cast<double>(n); });
BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_test_name = "BM_Complexity_O_N"; const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
@ -136,39 +128,31 @@ const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)"; const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
enum_auto_big_o_n);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
lambda_big_o_n);
// ========================================================================= // // ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- // // ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= // // ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) { static void BM_Complexity_O_N_log_N(benchmark::State &state)
auto v = ConstructRandomVector(state.range(0)); {
for (auto _ : state) { auto v = ConstructRandomVector(state.range(0));
std::sort(v.begin(), v.end()); for (auto _ : state)
} {
state.SetComplexityN(state.range(0)); std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
} }
static const double kLog2E = 1.44269504088896340736; static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2) ->RangeMultiplier(2)
->Range(1 << 10, 1 << 16) ->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN); ->Complexity([](benchmark::IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); });
BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
@ -177,37 +161,36 @@ const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)"; const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= // // ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ // // -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= // // ========================================================================= //
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { void BM_ComplexityCaptureArgs(benchmark::State &state, int n)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
state.SetComplexityN(n); benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(n);
} }
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)->Complexity(benchmark::oN)->Ranges({{1, 2}, {3, 4}});
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name = const std::string complexity_capture_name = "BM_ComplexityCaptureArgs/capture_test";
"BM_ComplexityCaptureArgs/capture_test";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", complexity_capture_name + "_RMS", "N");
complexity_capture_name + "_RMS", "N");
// ========================================================================= // // ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -12,49 +12,55 @@
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. #error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
#endif #endif
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
while (state.KeepRunning()) { {
volatile benchmark::IterationCount x = state.iterations(); while (state.KeepRunning())
((void)x); {
} volatile benchmark::IterationCount x = state.iterations();
((void)x);
}
} }
BENCHMARK(BM_empty); BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support. // The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03. // Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) { void BM_old_arg_range_interface(benchmark::State &state)
assert((state.range(0) == 1 && state.range(1) == 2) || {
(state.range(0) == 5 && state.range(1) == 6)); assert((state.range(0) == 1 && state.range(1) == 2) || (state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) { while (state.KeepRunning())
} {
}
} }
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U> template <class T, class U> void BM_template2(benchmark::State &state)
void BM_template2(benchmark::State& state) { {
BM_empty(state); BM_empty(state);
} }
BENCHMARK_TEMPLATE2(BM_template2, int, long); BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T> template <class T> void BM_template1(benchmark::State &state)
void BM_template1(benchmark::State& state) { {
BM_empty(state); BM_empty(state);
} }
BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int); BENCHMARK_TEMPLATE1(BM_template1, int);
template <class T> template <class T> struct BM_Fixture : public ::benchmark::Fixture
struct BM_Fixture : public ::benchmark::Fixture { {
}; };
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State &state)
BM_empty(state); {
BM_empty(state);
} }
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State &state)
BM_empty(state); {
BM_empty(state);
} }
void BM_counters(benchmark::State& state) { void BM_counters(benchmark::State &state)
{
BM_empty(state); BM_empty(state);
state.counters["Foo"] = 2; state.counters["Foo"] = 2;
} }

View File

@ -17,64 +17,80 @@
#define TEST_HAS_NO_EXCEPTIONS #define TEST_HAS_NO_EXCEPTIONS
#endif #endif
void TestHandler() { void TestHandler()
{
#ifndef TEST_HAS_NO_EXCEPTIONS #ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error(""); throw std::logic_error("");
#else #else
std::abort(); std::abort();
#endif #endif
} }
void try_invalid_pause_resume(benchmark::State& state) { void try_invalid_pause_resume(benchmark::State &state)
{
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) #if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try { try
state.PauseTiming(); {
std::abort(); state.PauseTiming();
} catch (std::logic_error const&) { std::abort();
} }
try { catch (std::logic_error const &)
state.ResumeTiming(); {
std::abort(); }
} catch (std::logic_error const&) { try
} {
state.ResumeTiming();
std::abort();
}
catch (std::logic_error const &)
{
}
#else #else
(void)state; // avoid unused warning (void)state; // avoid unused warning
#endif #endif
} }
void BM_diagnostic_test(benchmark::State& state) { void BM_diagnostic_test(benchmark::State &state)
static bool called_once = false; {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
for (auto _ : state) { for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
called_once = true; called_once = true;
} }
BENCHMARK(BM_diagnostic_test); BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State &state)
{
static bool called_once = false;
void BM_diagnostic_test_keep_running(benchmark::State& state) { if (called_once == false)
static bool called_once = false; try_invalid_pause_resume(state);
if (called_once == false) try_invalid_pause_resume(state); while (state.KeepRunning())
{
benchmark::DoNotOptimize(state.iterations());
}
while(state.KeepRunning()) { if (called_once == false)
benchmark::DoNotOptimize(state.iterations()); try_invalid_pause_resume(state);
}
if (called_once == false) try_invalid_pause_resume(state); called_once = true;
called_once = true;
} }
BENCHMARK(BM_diagnostic_test_keep_running); BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
benchmark::internal::GetAbortHandler() = &TestHandler; {
benchmark::Initialize(&argc, argv); benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::RunSpecifiedBenchmarks(); benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
} }

View File

@ -10,34 +10,36 @@
// reporter in the presence of DisplayAggregatesOnly(). // reporter in the presence of DisplayAggregatesOnly().
// We do not care about console output, the normal tests check that already. // We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
const std::string output = GetFileReporterOutput(argc, argv); {
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != {
1) { std::cout << "Precondition mismatch. Expected to only find 6 "
std::cout << "Precondition mismatch. Expected to only find 6 " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", " "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " "output:\n";
"output:\n"; std::cout << output;
std::cout << output; return 1;
return 1; }
}
return 0; return 0;
} }

View File

@ -4,160 +4,181 @@
#pragma clang diagnostic ignored "-Wreturn-type" #pragma clang diagnostic ignored "-Wreturn-type"
#endif #endif
extern "C" { extern "C"
{
extern int ExternInt; extern int ExternInt;
extern int ExternInt2; extern int ExternInt2;
extern int ExternInt3; extern int ExternInt3;
inline int Add42(int x) { return x + 42; } inline int Add42(int x)
{
return x + 42;
}
struct NotTriviallyCopyable { struct NotTriviallyCopyable
NotTriviallyCopyable(); {
explicit NotTriviallyCopyable(int x) : value(x) {} NotTriviallyCopyable();
NotTriviallyCopyable(NotTriviallyCopyable const&); explicit NotTriviallyCopyable(int x) : value(x)
int value; {
}; }
NotTriviallyCopyable(NotTriviallyCopyable const &);
struct Large { int value;
int value; };
int data[2];
};
struct Large
{
int value;
int data[2];
};
} }
// CHECK-LABEL: test_with_rvalue: // CHECK-LABEL: test_with_rvalue:
extern "C" void test_with_rvalue() { extern "C" void test_with_rvalue()
benchmark::DoNotOptimize(Add42(0)); {
// CHECK: movl $42, %eax benchmark::DoNotOptimize(Add42(0));
// CHECK: ret // CHECK: movl $42, %eax
// CHECK: ret
} }
// CHECK-LABEL: test_with_large_rvalue: // CHECK-LABEL: test_with_large_rvalue:
extern "C" void test_with_large_rvalue() { extern "C" void test_with_large_rvalue()
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); {
// CHECK: ExternInt(%rip) benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] // CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
} }
// CHECK-LABEL: test_with_non_trivial_rvalue: // CHECK-LABEL: test_with_non_trivial_rvalue:
extern "C" void test_with_non_trivial_rvalue() { extern "C" void test_with_non_trivial_rvalue()
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); {
// CHECK: mov{{l|q}} ExternInt(%rip) benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: ret // CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret
} }
// CHECK-LABEL: test_with_lvalue: // CHECK-LABEL: test_with_lvalue:
extern "C" void test_with_lvalue() { extern "C" void test_with_lvalue()
int x = 101; {
benchmark::DoNotOptimize(x); int x = 101;
// CHECK-GNU: movl $101, %eax benchmark::DoNotOptimize(x);
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK-GNU: movl $101, %eax
// CHECK: ret // CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
} }
// CHECK-LABEL: test_with_large_lvalue: // CHECK-LABEL: test_with_large_lvalue:
extern "C" void test_with_large_lvalue() { extern "C" void test_with_large_lvalue()
Large L{ExternInt, {ExternInt, ExternInt}}; {
benchmark::DoNotOptimize(L); Large L{ExternInt, {ExternInt, ExternInt}};
// CHECK: ExternInt(%rip) benchmark::DoNotOptimize(L);
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
} }
// CHECK-LABEL: test_with_non_trivial_lvalue: // CHECK-LABEL: test_with_non_trivial_lvalue:
extern "C" void test_with_non_trivial_lvalue() { extern "C" void test_with_non_trivial_lvalue()
NotTriviallyCopyable NTC(ExternInt); {
benchmark::DoNotOptimize(NTC); NotTriviallyCopyable NTC(ExternInt);
// CHECK: ExternInt(%rip) benchmark::DoNotOptimize(NTC);
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ExternInt(%rip)
// CHECK: ret // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
} }
// CHECK-LABEL: test_with_const_lvalue: // CHECK-LABEL: test_with_const_lvalue:
extern "C" void test_with_const_lvalue() { extern "C" void test_with_const_lvalue()
const int x = 123; {
benchmark::DoNotOptimize(x); const int x = 123;
// CHECK: movl $123, %eax benchmark::DoNotOptimize(x);
// CHECK: ret // CHECK: movl $123, %eax
// CHECK: ret
} }
// CHECK-LABEL: test_with_large_const_lvalue: // CHECK-LABEL: test_with_large_const_lvalue:
extern "C" void test_with_large_const_lvalue() { extern "C" void test_with_large_const_lvalue()
const Large L{ExternInt, {ExternInt, ExternInt}}; {
benchmark::DoNotOptimize(L); const Large L{ExternInt, {ExternInt, ExternInt}};
// CHECK: ExternInt(%rip) benchmark::DoNotOptimize(L);
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]]) // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret // CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
} }
// CHECK-LABEL: test_with_non_trivial_const_lvalue: // CHECK-LABEL: test_with_non_trivial_const_lvalue:
extern "C" void test_with_non_trivial_const_lvalue() { extern "C" void test_with_non_trivial_const_lvalue()
const NotTriviallyCopyable Obj(ExternInt); {
benchmark::DoNotOptimize(Obj); const NotTriviallyCopyable Obj(ExternInt);
// CHECK: mov{{q|l}} ExternInt(%rip) benchmark::DoNotOptimize(Obj);
// CHECK: ret // CHECK: mov{{q|l}} ExternInt(%rip)
// CHECK: ret
} }
// CHECK-LABEL: test_div_by_two: // CHECK-LABEL: test_div_by_two:
extern "C" int test_div_by_two(int input) { extern "C" int test_div_by_two(int input)
int divisor = 2; {
benchmark::DoNotOptimize(divisor); int divisor = 2;
return input / divisor; benchmark::DoNotOptimize(divisor);
// CHECK: movl $2, [[DEST:.*]] return input / divisor;
// CHECK: idivl [[DEST]] // CHECK: movl $2, [[DEST:.*]]
// CHECK: ret // CHECK: idivl [[DEST]]
// CHECK: ret
} }
// CHECK-LABEL: test_inc_integer: // CHECK-LABEL: test_inc_integer:
extern "C" int test_inc_integer() { extern "C" int test_inc_integer()
int x = 0; {
for (int i=0; i < 5; ++i) int x = 0;
benchmark::DoNotOptimize(++x); for (int i = 0; i < 5; ++i)
// CHECK: movl $1, [[DEST:.*]] benchmark::DoNotOptimize(++x);
// CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: movl $1, [[DEST:.*]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK-CLANG: movl [[DEST]], %eax // CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: ret // CHECK-CLANG: movl [[DEST]], %eax
return x; // CHECK: ret
return x;
} }
// CHECK-LABEL: test_pointer_rvalue // CHECK-LABEL: test_pointer_rvalue
extern "C" void test_pointer_rvalue() { extern "C" void test_pointer_rvalue()
// CHECK: movl $42, [[DEST:.*]] {
// CHECK: leaq [[DEST]], %rax // CHECK: movl $42, [[DEST:.*]]
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: leaq [[DEST]], %rax
// CHECK: ret // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
int x = 42; // CHECK: ret
benchmark::DoNotOptimize(&x); int x = 42;
benchmark::DoNotOptimize(&x);
} }
// CHECK-LABEL: test_pointer_const_lvalue: // CHECK-LABEL: test_pointer_const_lvalue:
extern "C" void test_pointer_const_lvalue() { extern "C" void test_pointer_const_lvalue()
// CHECK: movl $42, [[DEST:.*]] {
// CHECK: leaq [[DEST]], %rax // CHECK: movl $42, [[DEST:.*]]
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: leaq [[DEST]], %rax
// CHECK: ret // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
int x = 42; // CHECK: ret
int * const xp = &x; int x = 42;
benchmark::DoNotOptimize(xp); int *const xp = &x;
benchmark::DoNotOptimize(xp);
} }
// CHECK-LABEL: test_pointer_lvalue: // CHECK-LABEL: test_pointer_lvalue:
extern "C" void test_pointer_lvalue() { extern "C" void test_pointer_lvalue()
// CHECK: movl $42, [[DEST:.*]] {
// CHECK: leaq [[DEST]], %rax // CHECK: movl $42, [[DEST:.*]]
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) // CHECK: leaq [[DEST]], %rax
// CHECK: ret // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
int x = 42; // CHECK: ret
int *xp = &x; int x = 42;
benchmark::DoNotOptimize(xp); int *xp = &x;
benchmark::DoNotOptimize(xp);
} }

View File

@ -2,51 +2,61 @@
#include <cstdint> #include <cstdint>
namespace { namespace
{
#if defined(__GNUC__) #if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif #endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; } std::uint64_t double_up(const std::uint64_t x)
{
return x * 2;
} }
} // namespace
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems // Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang. // with the inline assembly on both GCC and Clang.
struct BitRef { struct BitRef
int index; {
unsigned char &byte; int index;
unsigned char &byte;
public: public:
static BitRef Make() { static BitRef Make()
static unsigned char arr[2] = {}; {
BitRef b(1, arr[0]); static unsigned char arr[2] = {};
return b; BitRef b(1, arr[0]);
} return b;
private: }
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
private:
BitRef(int i, unsigned char &b) : index(i), byte(b)
{
}
}; };
int main(int, char*[]) { int main(int, char *[])
// this test verifies compilation of DoNotOptimize() for some types {
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8] = ""; char buffer8[8] = "";
benchmark::DoNotOptimize(buffer8); benchmark::DoNotOptimize(buffer8);
char buffer20[20] = ""; char buffer20[20] = "";
benchmark::DoNotOptimize(buffer20); benchmark::DoNotOptimize(buffer20);
char buffer1024[1024] = ""; char buffer1024[1024] = "";
benchmark::DoNotOptimize(buffer1024); benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]); benchmark::DoNotOptimize(&buffer1024[0]);
int x = 123; int x = 123;
benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x); benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42); benchmark::DoNotOptimize(x += 42);
benchmark::DoNotOptimize(double_up(x)); benchmark::DoNotOptimize(double_up(x));
// These tests are to e // These tests are to e
benchmark::DoNotOptimize(BitRef::Make()); benchmark::DoNotOptimize(BitRef::Make());
BitRef lval = BitRef::Make(); BitRef lval = BitRef::Make();
benchmark::DoNotOptimize(lval); benchmark::DoNotOptimize(lval);
} }

View File

@ -10,95 +10,116 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
public: {
virtual bool ReportContext(const Context& context) { public:
return ConsoleReporter::ReportContext(context); virtual bool ReportContext(const Context &context)
}; {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) { virtual void ReportRuns(const std::vector<Run> &report)
++count_; {
ConsoleReporter::ReportRuns(report); ++count_;
}; ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0) {} TestReporter() : count_(0)
{
}
virtual ~TestReporter() {} virtual ~TestReporter()
{
}
size_t GetCount() const { return count_; } size_t GetCount() const
{
return count_;
}
private: private:
mutable size_t count_; mutable size_t count_;
}; };
} // end namespace } // end namespace
static void NoPrefix(benchmark::State& state) { static void NoPrefix(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(NoPrefix); BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) { static void BM_Foo(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_Foo); BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) { static void BM_Bar(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_Bar); BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) { static void BM_FooBar(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_FooBar); BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) { static void BM_FooBa(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_FooBa); BENCHMARK(BM_FooBa);
int main(int argc, char **argv) { int main(int argc, char **argv)
bool list_only = false; {
for (int i = 0; i < argc; ++i) bool list_only = false;
list_only |= std::string(argv[i]).find("--benchmark_list_tests") != for (int i = 0; i < argc; ++i)
std::string::npos; list_only |= std::string(argv[i]).find("--benchmark_list_tests") != std::string::npos;
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
TestReporter test_reporter; TestReporter test_reporter;
const size_t returned_count = const size_t returned_count = benchmark::RunSpecifiedBenchmarks(&test_reporter);
benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) { if (argc == 2)
// Make sure we ran all of the tests {
std::stringstream ss(argv[1]); // Make sure we ran all of the tests
size_t expected_return; std::stringstream ss(argv[1]);
ss >> expected_return; size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) { if (returned_count != expected_return)
std::cerr << "ERROR: Expected " << expected_return {
<< " tests to match the filter but returned_count = " std::cerr << "ERROR: Expected " << expected_return
<< returned_count << std::endl; << " tests to match the filter but returned_count = " << returned_count << std::endl;
return -1; return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports)
{
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count << std::endl;
return -1;
}
} }
const size_t expected_reports = list_only ? 0 : expected_return; return 0;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
return -1;
}
}
return 0;
} }

View File

@ -4,44 +4,57 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
class MyFixture : public ::benchmark::Fixture { class MyFixture : public ::benchmark::Fixture
public: {
void SetUp(const ::benchmark::State& state) { public:
if (state.thread_index == 0) { void SetUp(const ::benchmark::State &state)
assert(data.get() == nullptr); {
data.reset(new int(42)); if (state.thread_index == 0)
{
assert(data.get() == nullptr);
data.reset(new int(42));
}
} }
}
void TearDown(const ::benchmark::State& state) { void TearDown(const ::benchmark::State &state)
if (state.thread_index == 0) { {
assert(data.get() != nullptr); if (state.thread_index == 0)
data.reset(); {
assert(data.get() != nullptr);
data.reset();
}
} }
}
~MyFixture() { assert(data == nullptr); } ~MyFixture()
{
assert(data == nullptr);
}
std::unique_ptr<int> data; std::unique_ptr<int> data;
}; };
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { BENCHMARK_F(MyFixture, Foo)(benchmark::State &st)
assert(data.get() != nullptr); {
assert(*data == 42); assert(data.get() != nullptr);
for (auto _ : st) { assert(*data == 42);
} for (auto _ : st)
{
}
} }
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State &st)
if (st.thread_index == 0) { {
assert(data.get() != nullptr); if (st.thread_index == 0)
assert(*data == 42); {
} assert(data.get() != nullptr);
for (auto _ : st) { assert(*data == 42);
assert(data.get() != nullptr); }
assert(*data == 42); for (auto _ : st)
} {
st.SetItemsProcessed(st.range(0)); assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
} }
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();

View File

@ -1,29 +1,28 @@
#undef NDEBUG #undef NDEBUG
#include <chrono>
#include <thread>
#include "../src/timers.h" #include "../src/timers.h"
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "output_test.h" #include "output_test.h"
#include <chrono>
#include <thread>
static const std::chrono::duration<double, std::milli> time_frame(50); static const std::chrono::duration<double, std::milli> time_frame(50);
static const double time_frame_in_sec( static const double time_frame_in_sec(
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>( std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(time_frame).count());
time_frame)
.count());
void MyBusySpinwait() { void MyBusySpinwait()
const auto start = benchmark::ChronoClockNow(); {
const auto start = benchmark::ChronoClockNow();
while (true) { while (true)
const auto now = benchmark::ChronoClockNow(); {
const auto elapsed = now - start; const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start;
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= time_frame)
time_frame) return;
return; }
}
} }
// ========================================================================= // // ========================================================================= //
@ -33,152 +32,92 @@ void MyBusySpinwait() {
// ========================================================================= // // ========================================================================= //
// BM_MainThread // BM_MainThread
void BM_MainThread(benchmark::State& state) { void BM_MainThread(benchmark::State &state)
for (auto _ : state) { {
MyBusySpinwait(); for (auto _ : state)
state.SetIterationTime(time_frame_in_sec); {
} MyBusySpinwait();
state.counters["invtime"] = state.SetIterationTime(time_frame_in_sec);
benchmark::Counter{1, benchmark::Counter::kIsRate}; }
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// BM_WorkerThread // BM_WorkerThread
void BM_WorkerThread(benchmark::State& state) { void BM_WorkerThread(benchmark::State &state)
for (auto _ : state) { {
std::thread Worker(&MyBusySpinwait); for (auto _ : state)
Worker.join(); {
state.SetIterationTime(time_frame_in_sec); std::thread Worker(&MyBusySpinwait);
} Worker.join();
state.counters["invtime"] = state.SetIterationTime(time_frame_in_sec);
benchmark::Counter{1, benchmark::Counter::kIsRate}; }
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// BM_MainThreadAndWorkerThread // BM_MainThreadAndWorkerThread
void BM_MainThreadAndWorkerThread(benchmark::State& state) { void BM_MainThreadAndWorkerThread(benchmark::State &state)
for (auto _ : state) { {
std::thread Worker(&MyBusySpinwait); for (auto _ : state)
MyBusySpinwait(); {
Worker.join(); std::thread Worker(&MyBusySpinwait);
state.SetIterationTime(time_frame_in_sec); MyBusySpinwait();
} Worker.join();
state.counters["invtime"] = state.SetIterationTime(time_frame_in_sec);
benchmark::Counter{1, benchmark::Counter::kIsRate}; }
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
->Threads(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
->UseRealTime(); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Iterations(1)
->Threads(1)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
->Threads(2) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
->UseRealTime(); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Iterations(1)
->Threads(2)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// ---------------------------- TEST CASES END ----------------------------- // // ---------------------------- TEST CASES END ----------------------------- //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -1,8 +1,10 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
benchmark::DoNotOptimize(state.iterations()); for (auto _ : state)
} {
benchmark::DoNotOptimize(state.iterations());
}
} }
BENCHMARK(BM_empty); BENCHMARK(BM_empty);

View File

@ -3,54 +3,68 @@
#include <cstdlib> #include <cstdlib>
#include <map> #include <map>
namespace { namespace
{
std::map<int, int> ConstructRandomMap(int size) { std::map<int, int> ConstructRandomMap(int size)
std::map<int, int> m; {
for (int i = 0; i < size; ++i) { std::map<int, int> m;
m.insert(std::make_pair(std::rand() % size, std::rand() % size)); for (int i = 0; i < size; ++i)
} {
return m; m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
} }
} // namespace } // namespace
// Basic version. // Basic version.
static void BM_MapLookup(benchmark::State& state) { static void BM_MapLookup(benchmark::State &state)
const int size = static_cast<int>(state.range(0)); {
std::map<int, int> m; const int size = static_cast<int>(state.range(0));
for (auto _ : state) { std::map<int, int> m;
state.PauseTiming(); for (auto _ : state)
m = ConstructRandomMap(size); {
state.ResumeTiming(); state.PauseTiming();
for (int i = 0; i < size; ++i) { m = ConstructRandomMap(size);
benchmark::DoNotOptimize(m.find(std::rand() % size)); state.ResumeTiming();
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
} }
} state.SetItemsProcessed(state.iterations() * size);
state.SetItemsProcessed(state.iterations() * size);
} }
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures. // Using fixtures.
class MapFixture : public ::benchmark::Fixture { class MapFixture : public ::benchmark::Fixture
public: {
void SetUp(const ::benchmark::State& st) { public:
m = ConstructRandomMap(static_cast<int>(st.range(0))); void SetUp(const ::benchmark::State &st)
} {
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) { m.clear(); } void TearDown(const ::benchmark::State &)
{
m.clear();
}
std::map<int, int> m; std::map<int, int> m;
}; };
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State &state)
const int size = static_cast<int>(state.range(0)); {
for (auto _ : state) { const int size = static_cast<int>(state.range(0));
for (int i = 0; i < size; ++i) { for (auto _ : state)
benchmark::DoNotOptimize(m.find(std::rand() % size)); {
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
} }
} state.SetItemsProcessed(state.iterations() * size);
state.SetItemsProcessed(state.iterations() * size);
} }
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);

View File

@ -4,18 +4,24 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "output_test.h" #include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager { class TestMemoryManager : public benchmark::MemoryManager
void Start() {} {
void Stop(Result* result) { void Start()
result->num_allocs = 42; {
result->max_bytes_used = 42000; }
} void Stop(Result *result)
{
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
}; };
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
benchmark::DoNotOptimize(state.iterations()); for (auto _ : state)
} {
benchmark::DoNotOptimize(state.iterations());
}
} }
BENCHMARK(BM_empty); BENCHMARK(BM_empty);
@ -35,10 +41,11 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager()); {
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get()); benchmark::RegisterMemoryManager(mm.get());
RunOutputTests(argc, argv); RunOutputTests(argc, argv);
benchmark::RegisterMemoryManager(nullptr); benchmark::RegisterMemoryManager(nullptr);
} }

View File

@ -5,72 +5,84 @@
#include <set> #include <set>
#include <vector> #include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture { class MultipleRangesFixture : public ::benchmark::Fixture
public: {
MultipleRangesFixture() public:
: expectedValues({{1, 3, 5}, MultipleRangesFixture()
{1, 3, 8}, : expectedValues({{1, 3, 5},
{1, 3, 15}, {1, 3, 8},
{2, 3, 5}, {1, 3, 15},
{2, 3, 8}, {2, 3, 5},
{2, 3, 15}, {2, 3, 8},
{1, 4, 5}, {2, 3, 15},
{1, 4, 8}, {1, 4, 5},
{1, 4, 15}, {1, 4, 8},
{2, 4, 5}, {1, 4, 15},
{2, 4, 8}, {2, 4, 5},
{2, 4, 15}, {2, 4, 8},
{1, 7, 5}, {2, 4, 15},
{1, 7, 8}, {1, 7, 5},
{1, 7, 15}, {1, 7, 8},
{2, 7, 5}, {1, 7, 15},
{2, 7, 8}, {2, 7, 5},
{2, 7, 15}, {2, 7, 8},
{7, 6, 3}}) {} {2, 7, 15},
{7, 6, 3}})
void SetUp(const ::benchmark::State& state) { {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture() {
if (actualValues != expectedValues) {
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
} }
}
std::set<std::vector<int64_t>> expectedValues; void SetUp(const ::benchmark::State &state)
std::set<std::vector<int64_t>> actualValues; {
std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture()
{
if (actualValues != expectedValues)
{
std::cout << "EXPECTED\n";
for (auto v : expectedValues)
{
std::cout << "{";
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues)
{
std::cout << "{";
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
}
}
}
std::set<std::vector<int64_t>> expectedValues;
std::set<std::vector<int64_t>> actualValues;
}; };
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State &state)
for (auto _ : state) { {
int64_t product = state.range(0) * state.range(1) * state.range(2); for (auto _ : state)
for (int64_t x = 0; x < product; x++) { {
benchmark::DoNotOptimize(x); int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++)
{
benchmark::DoNotOptimize(x);
}
} }
}
} }
BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
@ -78,18 +90,22 @@ BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->Ranges({{1, 2}, {3, 7}, {5, 15}}) ->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3}); ->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) { void BM_CheckDefaultArgument(benchmark::State &state)
// Test that the 'range()' without an argument is the same as 'range(0)'. {
assert(state.range() == state.range(0)); // Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() != state.range(1)); assert(state.range() == state.range(0));
for (auto _ : state) { assert(state.range() != state.range(1));
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) { static void BM_MultipleRanges(benchmark::State &st)
for (auto _ : st) { {
} for (auto _ : st)
{
}
} }
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});

View File

@ -7,17 +7,20 @@
#endif #endif
#include <cassert> #include <cassert>
void BM_basic(benchmark::State& state) { void BM_basic(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
void BM_basic_slow(benchmark::State& state) { void BM_basic_slow(benchmark::State &state)
std::chrono::milliseconds sleep_duration(state.range(0)); {
for (auto _ : state) { std::chrono::milliseconds sleep_duration(state.range(0));
std::this_thread::sleep_for( for (auto _ : state)
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)); {
} std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
} }
BENCHMARK(BM_basic); BENCHMARK(BM_basic);
@ -37,8 +40,7 @@ BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3); BENCHMARK(BM_basic)->Repetitions(3);
BENCHMARK(BM_basic) BENCHMARK(BM_basic)
->RangeMultiplier(std::numeric_limits<int>::max()) ->RangeMultiplier(std::numeric_limits<int>::max())
->Range(std::numeric_limits<int64_t>::min(), ->Range(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max());
std::numeric_limits<int64_t>::max());
// Negative ranges // Negative ranges
BENCHMARK(BM_basic)->Range(-64, -1); BENCHMARK(BM_basic)->Range(-64, -1);
@ -46,29 +48,31 @@ BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
BENCHMARK(BM_basic)->DenseRange(-2, 2, 1); BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}}); BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) { void CustomArgs(benchmark::internal::Benchmark *b)
for (int i = 0; i < 10; ++i) { {
b->Arg(i); for (int i = 0; i < 10; ++i)
} {
b->Arg(i);
}
} }
BENCHMARK(BM_basic)->Apply(CustomArgs); BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& state) { void BM_explicit_iteration_count(benchmark::State &state)
// Test that benchmarks specified with an explicit iteration count are {
// only run once. // Test that benchmarks specified with an explicit iteration count are
static bool invoked_before = false; // only run once.
assert(!invoked_before); static bool invoked_before = false;
invoked_before = true; assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
for (auto _ : state)
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
// Test that the requested iteration count is respected.
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
for (auto _ : state)
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
} }
BENCHMARK(BM_explicit_iteration_count)->Iterations(42); BENCHMARK(BM_explicit_iteration_count)->Iterations(42);

View File

@ -18,34 +18,36 @@
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \ #define SET_SUBSTITUTIONS(...) int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules { enum MatchRules
MR_Default, // Skip non-matching lines until a match is found. {
MR_Next, // Match must occur on the next line. MR_Default, // Skip non-matching lines until a match is found.
MR_Not // No line between the current position and the next match matches MR_Next, // Match must occur on the next line.
// the regex MR_Not // No line between the current position and the next match matches
// the regex
}; };
struct TestCase { struct TestCase
TestCase(std::string re, int rule = MR_Default); {
TestCase(std::string re, int rule = MR_Default);
std::string regex_str; std::string regex_str;
int match_rule; int match_rule;
std::string substituted_regex; std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex; std::shared_ptr<benchmark::Regex> regex;
}; };
enum TestCaseID { enum TestCaseID
TC_ConsoleOut, {
TC_ConsoleErr, TC_ConsoleOut,
TC_JSONOut, TC_ConsoleErr,
TC_JSONErr, TC_JSONOut,
TC_CSVOut, TC_JSONErr,
TC_CSVErr, TC_CSVOut,
TC_CSVErr,
TC_NumID // PRIVATE TC_NumID // PRIVATE
}; };
// Add a list of test cases to be run against the output specified by // Add a list of test cases to be run against the output specified by
@ -54,18 +56,17 @@ int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's // Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions. // See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions( int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il);
std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests. // Run all output tests.
void RunOutputTests(int argc, char* argv[]); void RunOutputTests(int argc, char *argv[]);
// Count the number of 'pat' substrings in the 'haystack' string. // Count the number of 'pat' substrings in the 'haystack' string.
int SubstrCnt(const std::string& haystack, const std::string& pat); int SubstrCnt(const std::string &haystack, const std::string &pat);
// Run registered benchmarks with file reporter enabled, and return the content // Run registered benchmarks with file reporter enabled, and return the content
// outputted by the file reporter. // outputted by the file reporter.
std::string GetFileReporterOutput(int argc, char* argv[]); std::string GetFileReporterOutput(int argc, char *argv[]);
// ========================================================================= // // ========================================================================= //
// ------------------------- Results checking ------------------------------ // // ------------------------- Results checking ------------------------------ //
@ -79,77 +80,87 @@ std::string GetFileReporterOutput(int argc, char* argv[]);
// all the benchmark names. Matching benchmarks // all the benchmark names. Matching benchmarks
// will be the subject of a call to checker_function // will be the subject of a call to checker_function
// checker_function: should be of type ResultsCheckFn (see below) // checker_function: should be of type ResultsCheckFn (see below)
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ #define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
struct Results; struct Results;
typedef std::function<void(Results const&)> ResultsCheckFn; typedef std::function<void(Results const &)> ResultsCheckFn;
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); size_t AddChecker(const char *bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark. // Class holding the results of a benchmark.
// It is passed in calls to checker functions. // It is passed in calls to checker functions.
struct Results { struct Results
// the benchmark name {
std::string name; // the benchmark name
// the benchmark fields std::string name;
std::map<std::string, std::string> values; // the benchmark fields
std::map<std::string, std::string> values;
Results(const std::string& n) : name(n) {} Results(const std::string &n) : name(n)
{
}
int NumThreads() const; int NumThreads() const;
double NumIterations() const; double NumIterations() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime; typedef enum
{
kCpuTime,
kRealTime
} BenchmarkTime;
// get cpu_time or real_time in seconds // get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const; double GetTime(BenchmarkTime which) const;
// get the real_time duration of the benchmark in seconds. // get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float // it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy. // ASCII formatting is lossy.
double DurationRealTime() const { double DurationRealTime() const
return NumIterations() * GetTime(kRealTime); {
} return NumIterations() * GetTime(kRealTime);
// get the cpu_time duration of the benchmark in seconds }
double DurationCPUTime() const { // get the cpu_time duration of the benchmark in seconds
return NumIterations() * GetTime(kCpuTime); double DurationCPUTime() const
} {
return NumIterations() * GetTime(kCpuTime);
}
// get the string for a result by name, or nullptr if the name // get the string for a result by name, or nullptr if the name
// is not found // is not found
const std::string* Get(const char* entry_name) const { const std::string *Get(const char *entry_name) const
auto it = values.find(entry_name); {
if (it == values.end()) return nullptr; auto it = values.find(entry_name);
return &it->second; if (it == values.end())
} return nullptr;
return &it->second;
}
// get a result by name, parsed as a specific type. // get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead. // NOTE: for counters, use GetCounterAs instead.
template <class T> template <class T> T GetAs(const char *entry_name) const;
T GetAs(const char* entry_name) const;
// counters are written as doubles, so they have to be read first // counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type. // as a double, and only then converted to the asked type.
template <class T> template <class T> T GetCounterAs(const char *entry_name) const
T GetCounterAs(const char* entry_name) const { {
double dval = GetAs<double>(entry_name); double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval); T tval = static_cast<T>(dval);
return tval; return tval;
} }
}; };
template <class T> template <class T> T Results::GetAs(const char *entry_name) const
T Results::GetAs(const char* entry_name) const { {
auto* sv = Get(entry_name); auto *sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty()); CHECK(sv != nullptr && !sv->empty());
std::stringstream ss; std::stringstream ss;
ss << *sv; ss << *sv;
T out; T out;
ss >> out; ss >> out;
CHECK(!ss.fail()); CHECK(!ss.fail());
return out; return out;
} }
//---------------------------------- //----------------------------------
@ -204,10 +215,11 @@ T Results::GetAs(const char* entry_name) const {
// --------------------------- Misc Utilities ------------------------------ // // --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= // // ========================================================================= //
namespace { namespace
{
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; const char *const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace } // end namespace
#endif // TEST_OUTPUT_TEST_H #endif // TEST_OUTPUT_TEST_H

View File

@ -9,15 +9,17 @@
#include <streambuf> #include <streambuf>
#include "../src/benchmark_api_internal.h" #include "../src/benchmark_api_internal.h"
#include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only #include "../src/re.h" // NOTE: re.h is for internal use only
#include "output_test.h" #include "output_test.h"
// ========================================================================= // // ========================================================================= //
// ------------------------------ Internals -------------------------------- // // ------------------------------ Internals -------------------------------- //
// ========================================================================= // // ========================================================================= //
namespace internal { namespace internal
namespace { {
namespace
{
using TestCaseList = std::vector<TestCase>; using TestCaseList = std::vector<TestCase>;
@ -28,16 +30,18 @@ using TestCaseList = std::vector<TestCase>;
// Substitute("%HelloWorld") // Always expands to Hello. // Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>; using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) { TestCaseList &GetTestCaseList(TestCaseID ID)
// Uses function-local statics to ensure initialization occurs {
// before first use. // Uses function-local statics to ensure initialization occurs
static TestCaseList lists[TC_NumID]; // before first use.
return lists[ID]; static TestCaseList lists[TC_NumID];
return lists[ID];
} }
SubMap& GetSubstitutions() { SubMap &GetSubstitutions()
// Don't use 'dec_re' from header because it may not yet be initialized. {
// clang-format off // Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
static std::string time_re = "([0-9]+[.])?[0-9]+"; static std::string time_re = "([0-9]+[.])?[0-9]+";
static SubMap map = { static SubMap map = {
@ -65,271 +69,325 @@ SubMap& GetSubstitutions() {
"," + safe_dec_re + ",,,"}, "," + safe_dec_re + ",,,"},
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
{"%csv_label_report_end", ",,"}}; {"%csv_label_report_end", ",,"}};
// clang-format on // clang-format on
return map; return map;
} }
std::string PerformSubstitutions(std::string source) { std::string PerformSubstitutions(std::string source)
SubMap const& subs = GetSubstitutions(); {
using SizeT = std::string::size_type; SubMap const &subs = GetSubstitutions();
for (auto const& KV : subs) { using SizeT = std::string::size_type;
SizeT pos; for (auto const &KV : subs)
SizeT next_start = 0; {
while ((pos = source.find(KV.first, next_start)) != std::string::npos) { SizeT pos;
next_start = pos + KV.second.size(); SizeT next_start = 0;
source.replace(pos, KV.first.size(), KV.second); while ((pos = source.find(KV.first, next_start)) != std::string::npos)
{
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
} }
} return source;
return source;
} }
void CheckCase(std::stringstream& remaining_output, TestCase const& TC, void CheckCase(std::stringstream &remaining_output, TestCase const &TC, TestCaseList const &not_checks)
TestCaseList const& not_checks) { {
std::string first_line; std::string first_line;
bool on_first = true; bool on_first = true;
std::string line; std::string line;
while (remaining_output.eof() == false) { while (remaining_output.eof() == false)
CHECK(remaining_output.good()); {
std::getline(remaining_output, line); CHECK(remaining_output.good());
if (on_first) { std::getline(remaining_output, line);
first_line = line; if (on_first)
on_first = false; {
first_line = line;
on_first = false;
}
for (const auto &NC : not_checks)
{
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line))
return;
CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
} }
for (const auto& NC : not_checks) { CHECK(remaining_output.eof() == false)
CHECK(!NC.regex->Match(line)) << "End of output reached before match for regex \"" << TC.regex_str << "\" was found"
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line; << "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
} }
void CheckCases(TestCaseList const& checks, std::stringstream& output) { void CheckCases(TestCaseList const &checks, std::stringstream &output)
std::vector<TestCase> not_checks; {
for (size_t i = 0; i < checks.size(); ++i) { std::vector<TestCase> not_checks;
const auto& TC = checks[i]; for (size_t i = 0; i < checks.size(); ++i)
if (TC.match_rule == MR_Not) { {
not_checks.push_back(TC); const auto &TC = checks[i];
continue; if (TC.match_rule == MR_Not)
{
not_checks.push_back(TC);
continue;
}
CheckCase(output, TC, not_checks);
not_checks.clear();
} }
CheckCase(output, TC, not_checks);
not_checks.clear();
}
} }
class TestReporter : public benchmark::BenchmarkReporter { class TestReporter : public benchmark::BenchmarkReporter
public: {
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps) public:
: reporters_(reps) {} TestReporter(std::vector<benchmark::BenchmarkReporter *> reps) : reporters_(reps)
{
virtual bool ReportContext(const Context& context) {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
} }
(void)first;
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) { virtual bool ReportContext(const Context &context)
for (auto rep : reporters_) rep->ReportRuns(report); {
} bool last_ret = false;
void Finalize() { bool first = true;
for (auto rep : reporters_) rep->Finalize(); for (auto rep : reporters_)
} {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
(void)first;
return last_ret;
}
private: void ReportRuns(const std::vector<Run> &report)
std::vector<benchmark::BenchmarkReporter*> reporters_; {
for (auto rep : reporters_)
rep->ReportRuns(report);
}
void Finalize()
{
for (auto rep : reporters_)
rep->Finalize();
}
private:
std::vector<benchmark::BenchmarkReporter *> reporters_;
}; };
} // namespace } // namespace
} // end namespace internal } // end namespace internal
// ========================================================================= // // ========================================================================= //
// -------------------------- Results checking ----------------------------- // // -------------------------- Results checking ----------------------------- //
// ========================================================================= // // ========================================================================= //
namespace internal { namespace internal
{
// Utility class to manage subscribers for checking benchmark results. // Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results. // It works by parsing the CSV output to read the results.
class ResultsChecker { class ResultsChecker
public: {
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes public:
PatternAndFn(const std::string& rx, ResultsCheckFn fn_) struct PatternAndFn : public TestCase
: TestCase(rx), fn(fn_) {} { // reusing TestCase for its regexes
ResultsCheckFn fn; PatternAndFn(const std::string &rx, ResultsCheckFn fn_) : TestCase(rx), fn(fn_)
}; {
}
ResultsCheckFn fn;
};
std::vector<PatternAndFn> check_patterns; std::vector<PatternAndFn> check_patterns;
std::vector<Results> results; std::vector<Results> results;
std::vector<std::string> field_names; std::vector<std::string> field_names;
void Add(const std::string& entry_pattern, ResultsCheckFn fn); void Add(const std::string &entry_pattern, ResultsCheckFn fn);
void CheckResults(std::stringstream& output); void CheckResults(std::stringstream &output);
private: private:
void SetHeader_(const std::string& csv_header); void SetHeader_(const std::string &csv_header);
void SetValues_(const std::string& entry_csv_line); void SetValues_(const std::string &entry_csv_line);
std::vector<std::string> SplitCsv_(const std::string& line); std::vector<std::string> SplitCsv_(const std::string &line);
}; };
// store the static ResultsChecker in a function to prevent initialization // store the static ResultsChecker in a function to prevent initialization
// order problems // order problems
ResultsChecker& GetResultsChecker() { ResultsChecker &GetResultsChecker()
static ResultsChecker rc; {
return rc; static ResultsChecker rc;
return rc;
} }
// add a results checker for a benchmark // add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { void ResultsChecker::Add(const std::string &entry_pattern, ResultsCheckFn fn)
check_patterns.emplace_back(entry_pattern, fn); {
check_patterns.emplace_back(entry_pattern, fn);
} }
// check the results of all subscribed benchmarks // check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) { void ResultsChecker::CheckResults(std::stringstream &output)
// first reset the stream to the start {
{ // first reset the stream to the start
auto start = std::stringstream::pos_type(0); {
// clear before calling tellg() auto start = std::stringstream::pos_type(0);
output.clear(); // clear before calling tellg()
// seek to zero only when needed output.clear();
if (output.tellg() > start) output.seekg(start); // seek to zero only when needed
// and just in case if (output.tellg() > start)
output.clear(); output.seekg(start);
} // and just in case
// now go over every line and publish it to the ResultsChecker output.clear();
std::string line;
bool on_first = true;
while (output.eof() == false) {
CHECK(output.good());
std::getline(output, line);
if (on_first) {
SetHeader_(line); // this is important
on_first = false;
continue;
} }
SetValues_(line); // now go over every line and publish it to the ResultsChecker
} std::string line;
// finally we can call the subscribed check functions bool on_first = true;
for (const auto& p : check_patterns) { while (output.eof() == false)
VLOG(2) << "--------------------------------\n"; {
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; CHECK(output.good());
for (const auto& r : results) { std::getline(output, line);
if (!p.regex->Match(r.name)) { if (on_first)
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; {
continue; SetHeader_(line); // this is important
} else { on_first = false;
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; continue;
} }
VLOG(1) << "Checking results of " << r.name << ": ... \n"; SetValues_(line);
p.fn(r); }
VLOG(1) << "Checking results of " << r.name << ": OK.\n"; // finally we can call the subscribed check functions
for (const auto &p : check_patterns)
{
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto &r : results)
{
if (!p.regex->Match(r.name))
{
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
}
else
{
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
p.fn(r);
VLOG(1) << "Checking results of " << r.name << ": OK.\n";
}
} }
}
} }
// prepare for the names in this header // prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) { void ResultsChecker::SetHeader_(const std::string &csv_header)
field_names = SplitCsv_(csv_header); {
field_names = SplitCsv_(csv_header);
} }
// set the values for a benchmark // set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) { void ResultsChecker::SetValues_(const std::string &entry_csv_line)
if (entry_csv_line.empty()) return; // some lines are empty {
CHECK(!field_names.empty()); if (entry_csv_line.empty())
auto vals = SplitCsv_(entry_csv_line); return; // some lines are empty
CHECK_EQ(vals.size(), field_names.size()); CHECK(!field_names.empty());
results.emplace_back(vals[0]); // vals[0] is the benchmark name auto vals = SplitCsv_(entry_csv_line);
auto& entry = results.back(); CHECK_EQ(vals.size(), field_names.size());
for (size_t i = 1, e = vals.size(); i < e; ++i) { results.emplace_back(vals[0]); // vals[0] is the benchmark name
entry.values[field_names[i]] = vals[i]; auto &entry = results.back();
} for (size_t i = 1, e = vals.size(); i < e; ++i)
{
entry.values[field_names[i]] = vals[i];
}
} }
// a quick'n'dirty csv splitter (eliminating quotes) // a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) { std::vector<std::string> ResultsChecker::SplitCsv_(const std::string &line)
std::vector<std::string> out; {
if (line.empty()) return out; std::vector<std::string> out;
if (!field_names.empty()) out.reserve(field_names.size()); if (line.empty())
size_t prev = 0, pos = line.find_first_of(','), curr = pos; return out;
while (pos != line.npos) { if (!field_names.empty())
CHECK(curr > 0); out.reserve(field_names.size());
if (line[prev] == '"') ++prev; size_t prev = 0, pos = line.find_first_of(','), curr = pos;
if (line[curr - 1] == '"') --curr; while (pos != line.npos)
{
CHECK(curr > 0);
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev)); out.push_back(line.substr(prev, curr - prev));
prev = pos + 1; return out;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
out.push_back(line.substr(prev, curr - prev));
return out;
} }
} // end namespace internal } // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { size_t AddChecker(const char *bm_name, ResultsCheckFn fn)
auto& rc = internal::GetResultsChecker(); {
rc.Add(bm_name, fn); auto &rc = internal::GetResultsChecker();
return rc.results.size(); rc.Add(bm_name, fn);
return rc.results.size();
} }
int Results::NumThreads() const { int Results::NumThreads() const
auto pos = name.find("/threads:"); {
if (pos == name.npos) return 1; auto pos = name.find("/threads:");
auto end = name.find('/', pos + 9); if (pos == name.npos)
std::stringstream ss; return 1;
ss << name.substr(pos + 9, end); auto end = name.find('/', pos + 9);
int num = 1; std::stringstream ss;
ss >> num; ss << name.substr(pos + 9, end);
CHECK(!ss.fail()); int num = 1;
return num; ss >> num;
CHECK(!ss.fail());
return num;
} }
double Results::NumIterations() const { double Results::NumIterations() const
return GetAs<double>("iterations"); {
return GetAs<double>("iterations");
} }
double Results::GetTime(BenchmarkTime which) const { double Results::GetTime(BenchmarkTime which) const
CHECK(which == kCpuTime || which == kRealTime); {
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; CHECK(which == kCpuTime || which == kRealTime);
double val = GetAs<double>(which_str); const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
auto unit = Get("time_unit"); double val = GetAs<double>(which_str);
CHECK(unit); auto unit = Get("time_unit");
if (*unit == "ns") { CHECK(unit);
return val * 1.e-9; if (*unit == "ns")
} else if (*unit == "us") { {
return val * 1.e-6; return val * 1.e-9;
} else if (*unit == "ms") { }
return val * 1.e-3; else if (*unit == "us")
} else if (*unit == "s") { {
return val; return val * 1.e-6;
} else { }
CHECK(1 == 0) << "unknown time unit: " << *unit; else if (*unit == "ms")
return 0; {
} return val * 1.e-3;
}
else if (*unit == "s")
{
return val;
}
else
{
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
} }
// ========================================================================= // // ========================================================================= //
@ -337,40 +395,43 @@ double Results::GetTime(BenchmarkTime which) const {
// ========================================================================= // // ========================================================================= //
TestCase::TestCase(std::string re, int rule) TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)), : regex_str(std::move(re)), match_rule(rule), substituted_regex(internal::PerformSubstitutions(regex_str)),
match_rule(rule), regex(std::make_shared<benchmark::Regex>())
substituted_regex(internal::PerformSubstitutions(regex_str)), {
regex(std::make_shared<benchmark::Regex>()) { std::string err_str;
std::string err_str; regex->Init(substituted_regex, &err_str);
regex->Init(substituted_regex, &err_str); CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\""
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\n originally \"" << regex_str << "\""
<< "\"" << "\n got error: " << err_str;
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
} }
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) { int AddCases(TestCaseID ID, std::initializer_list<TestCase> il)
auto& L = internal::GetTestCaseList(ID); {
L.insert(L.end(), il); auto &L = internal::GetTestCaseList(ID);
return 0; L.insert(L.end(), il);
return 0;
} }
int SetSubstitutions( int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il)
std::initializer_list<std::pair<std::string, std::string>> il) { {
auto& subs = internal::GetSubstitutions(); auto &subs = internal::GetSubstitutions();
for (auto KV : il) { for (auto KV : il)
bool exists = false; {
KV.second = internal::PerformSubstitutions(KV.second); bool exists = false;
for (auto& EKV : subs) { KV.second = internal::PerformSubstitutions(KV.second);
if (EKV.first == KV.first) { for (auto &EKV : subs)
EKV.second = std::move(KV.second); {
exists = true; if (EKV.first == KV.first)
break; {
} EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists)
subs.push_back(std::move(KV));
} }
if (!exists) subs.push_back(std::move(KV)); return 0;
}
return 0;
} }
// Disable deprecated warnings temporarily because we need to reference // Disable deprecated warnings temporarily because we need to reference
@ -379,137 +440,145 @@ int SetSubstitutions(
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif #endif
void RunOutputTests(int argc, char* argv[]) { void RunOutputTests(int argc, char *argv[])
using internal::GetTestCaseList; {
benchmark::Initialize(&argc, argv); using internal::GetTestCaseList;
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); benchmark::Initialize(&argc, argv);
benchmark::ConsoleReporter CR(options); auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::JSONReporter JR; benchmark::ConsoleReporter CR(options);
benchmark::CSVReporter CSVR; benchmark::JSONReporter JR;
struct ReporterTest { benchmark::CSVReporter CSVR;
const char* name; struct ReporterTest
std::vector<TestCase>& output_cases; {
std::vector<TestCase>& error_cases; const char *name;
benchmark::BenchmarkReporter& reporter; std::vector<TestCase> &output_cases;
std::stringstream out_stream; std::vector<TestCase> &error_cases;
std::stringstream err_stream; benchmark::BenchmarkReporter &reporter;
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc, ReporterTest(const char *n, std::vector<TestCase> &out_tc, std::vector<TestCase> &err_tc,
std::vector<TestCase>& err_tc, benchmark::BenchmarkReporter &br)
benchmark::BenchmarkReporter& br) : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { {
reporter.SetOutputStream(&out_stream); reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream); reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto &rep_test : TestCases)
{
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
} }
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
};
// Create the test reporter and run the benchmarks. // now that we know the output is as expected, we can dispatch
std::cout << "Running benchmarks...\n"; // the checks to subscribees.
internal::TestReporter test_rep({&CR, &JR, &CSVR}); auto &csv = TestCases[2];
benchmark::RunSpecifiedBenchmarks(&test_rep); // would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
for (auto& rep_test : TestCases) { internal::GetResultsChecker().CheckResults(csv.out_stream);
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
// now that we know the output is as expected, we can dispatch
// the checks to subscribees.
auto& csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
} }
#ifdef __GNUC__ #ifdef __GNUC__
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
int SubstrCnt(const std::string& haystack, const std::string& pat) { int SubstrCnt(const std::string &haystack, const std::string &pat)
if (pat.length() == 0) return 0; {
int count = 0; if (pat.length() == 0)
for (size_t offset = haystack.find(pat); offset != std::string::npos; return 0;
offset = haystack.find(pat, offset + pat.length())) int count = 0;
++count; for (size_t offset = haystack.find(pat); offset != std::string::npos;
return count; offset = haystack.find(pat, offset + pat.length()))
++count;
return count;
} }
static char ToHex(int ch) { static char ToHex(int ch)
return ch < 10 ? static_cast<char>('0' + ch) {
: static_cast<char>('a' + (ch - 10)); return ch < 10 ? static_cast<char>('0' + ch) : static_cast<char>('a' + (ch - 10));
} }
static char RandomHexChar() { static char RandomHexChar()
static std::mt19937 rd{std::random_device{}()}; {
static std::uniform_int_distribution<int> mrand{0, 15}; static std::mt19937 rd{std::random_device{}()};
return ToHex(mrand(rd)); static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
} }
static std::string GetRandomFileName() { static std::string GetRandomFileName()
std::string model = "test.%%%%%%"; {
for (auto & ch : model) { std::string model = "test.%%%%%%";
if (ch == '%') for (auto &ch : model)
ch = RandomHexChar(); {
} if (ch == '%')
return model; ch = RandomHexChar();
}
return model;
} }
static bool FileExists(std::string const& name) { static bool FileExists(std::string const &name)
std::ifstream in(name.c_str()); {
return in.good(); std::ifstream in(name.c_str());
return in.good();
} }
static std::string GetTempFileName() { static std::string GetTempFileName()
// This function attempts to avoid race conditions where two tests {
// create the same file at the same time. However, it still introduces races // This function attempts to avoid race conditions where two tests
// similar to tmpnam. // create the same file at the same time. However, it still introduces races
int retries = 3; // similar to tmpnam.
while (--retries) { int retries = 3;
std::string name = GetRandomFileName(); while (--retries)
if (!FileExists(name)) {
return name; std::string name = GetRandomFileName();
} if (!FileExists(name))
std::cerr << "Failed to create unique temporary file name" << std::endl; return name;
std::abort(); }
std::cerr << "Failed to create unique temporary file name" << std::endl;
std::abort();
} }
std::string GetFileReporterOutput(int argc, char* argv[]) { std::string GetFileReporterOutput(int argc, char *argv[])
std::vector<char*> new_argv(argv, argv + argc); {
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size()); std::vector<char *> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string tmp_file_name = GetTempFileName(); std::string tmp_file_name = GetTempFileName();
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
std::string tmp = "--benchmark_out="; std::string tmp = "--benchmark_out=";
tmp += tmp_file_name; tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char*>(tmp.c_str())); new_argv.emplace_back(const_cast<char *>(tmp.c_str()));
argc = int(new_argv.size()); argc = int(new_argv.size());
benchmark::Initialize(&argc, new_argv.data()); benchmark::Initialize(&argc, new_argv.data());
benchmark::RunSpecifiedBenchmarks(); benchmark::RunSpecifiedBenchmarks();
// Read the output back from the file, and delete the file. // Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name); std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::istreambuf_iterator<char>());
std::istreambuf_iterator<char>()); std::remove(tmp_file_name.c_str());
std::remove(tmp_file_name.c_str());
return output; return output;
} }

View File

@ -3,33 +3,41 @@
#include <cassert> #include <cassert>
#include <vector> #include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
public: {
virtual void ReportRuns(const std::vector<Run>& report) { public:
all_runs_.insert(all_runs_.end(), begin(report), end(report)); virtual void ReportRuns(const std::vector<Run> &report)
ConsoleReporter::ReportRuns(report); {
} all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
std::vector<Run> all_runs_; std::vector<Run> all_runs_;
}; };
struct TestCase { struct TestCase
std::string name; {
const char* label; std::string name;
// Note: not explicit as we rely on it being converted through ADD_CASES. const char *label;
TestCase(const char* xname) : TestCase(xname, nullptr) {} // Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname, const char* xlabel) TestCase(const char *xname) : TestCase(xname, nullptr)
: name(xname), label(xlabel) {} {
}
TestCase(const char *xname, const char *xlabel) : name(xname), label(xlabel)
{
}
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const { void CheckRun(Run const &run) const
// clang-format off {
// clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got " CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name(); << run.benchmark_name();
if (label) { if (label) {
@ -38,37 +46,40 @@ struct TestCase {
} else { } else {
CHECK(run.report_label == ""); CHECK(run.report_label == "");
} }
// clang-format on // clang-format on
} }
}; };
std::vector<TestCase> ExpectedResults; std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) { int AddCases(std::initializer_list<TestCase> const &v)
for (auto N : v) { {
ExpectedResults.push_back(N); for (auto N : v)
} {
return 0; ExpectedResults.push_back(N);
}
return 0;
} }
#define CONCAT(x, y) CONCAT2(x, y) #define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y #define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__}) #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})
} // end namespace } // end namespace
typedef benchmark::internal::Benchmark* ReturnVal; typedef benchmark::internal::Benchmark *ReturnVal;
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments // Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) { void BM_function(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_function); BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark( ReturnVal dummy = benchmark::RegisterBenchmark("BM_function_manual_registration", BM_function);
"BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
@ -78,107 +89,118 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) { void BM_extra_args(benchmark::State &st, const char *label)
for (auto _ : st) { {
} for (auto _ : st)
st.SetLabel(label); {
}
st.SetLabel(label);
} }
int RegisterFromFunction() { int RegisterFromFunction()
std::pair<const char*, const char*> cases[] = { {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}}; std::pair<const char *, const char *> cases[] = {{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const& c : cases) for (auto const &c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0; return 0;
} }
int dummy2 = RegisterFromFunction(); int dummy2 = RegisterFromFunction();
ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK #endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
// Test RegisterBenchmark with different callable types // Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
struct CustomFixture { struct CustomFixture
void operator()(benchmark::State& st) { {
for (auto _ : st) { void operator()(benchmark::State &st)
{
for (auto _ : st)
{
}
} }
}
}; };
void TestRegistrationAtRuntime() { void TestRegistrationAtRuntime()
{
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
{ {
CustomFixture fx; CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx); benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"}); AddCases({"custom_fixture"});
} }
#endif #endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{ {
const char* x = "42"; const char *x = "42";
auto capturing_lam = [=](benchmark::State& st) { auto capturing_lam = [=](benchmark::State &st) {
for (auto _ : st) { for (auto _ : st)
} {
st.SetLabel(x); }
}; st.SetLabel(x);
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); };
AddCases({{"lambda_benchmark", x}}); benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
} AddCases({{"lambda_benchmark", x}});
}
#endif #endif
} }
// Test that all benchmarks, registered at either during static init or runtime, // Test that all benchmarks, registered at either during static init or runtime,
// are run and the results are passed to the reported. // are run and the results are passed to the reported.
void RunTestOne() { void RunTestOne()
TestRegistrationAtRuntime(); {
TestRegistrationAtRuntime();
TestReporter test_reporter; TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter); benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
assert(EB != ExpectedResults.end()); {
EB->CheckRun(run); assert(EB != ExpectedResults.end());
++EB; EB->CheckRun(run);
} ++EB;
assert(EB == ExpectedResults.end()); }
assert(EB == ExpectedResults.end());
} }
// Test that ClearRegisteredBenchmarks() clears all previously registered // Test that ClearRegisteredBenchmarks() clears all previously registered
// benchmarks. // benchmarks.
// Also test that new benchmarks can be registered and ran afterwards. // Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() { void RunTestTwo()
assert(ExpectedResults.size() != 0 && {
"must have at least one registered benchmark"); assert(ExpectedResults.size() != 0 && "must have at least one registered benchmark");
ExpectedResults.clear(); ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks(); benchmark::ClearRegisteredBenchmarks();
TestReporter test_reporter; TestReporter test_reporter;
size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == 0); assert(num_ran == 0);
assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end());
TestRegistrationAtRuntime(); TestRegistrationAtRuntime();
num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == ExpectedResults.size()); assert(num_ran == ExpectedResults.size());
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
assert(EB != ExpectedResults.end()); {
EB->CheckRun(run); assert(EB != ExpectedResults.end());
++EB; EB->CheckRun(run);
} ++EB;
assert(EB == ExpectedResults.end()); }
assert(EB == ExpectedResults.end());
} }
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
benchmark::Initialize(&argc, argv); {
benchmark::Initialize(&argc, argv);
RunTestOne(); RunTestOne();
RunTestTwo(); RunTestTwo();
} }

View File

@ -10,30 +10,32 @@
// reporter in the presence of ReportAggregatesOnly(). // reporter in the presence of ReportAggregatesOnly().
// We do not care about console output, the normal tests check that already. // We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
const std::string output = GetFileReporterOutput(argc, argv); {
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != {
1) { std::cout << "Precondition mismatch. Expected to only find three "
std::cout << "Precondition mismatch. Expected to only find three " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", " "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " "output:\n";
"output:\n"; std::cout << output;
std::cout << output; return 1;
return 1; }
}
return 0; return 0;
} }

View File

@ -9,53 +9,49 @@
// ---------------------- Testing Prologue Output -------------------------- // // ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= // // ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, ADD_CASES(TC_ConsoleOut,
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {"^[-]+$", MR_Next}});
{"^[-]+$", MR_Next}}); static int AddContextCases()
static int AddContextCases() { {
AddCases(TC_ConsoleErr, AddCases(TC_ConsoleErr, {
{ {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, {"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, });
}); AddCases(TC_JSONOut, {{"^\\{", MR_Default},
AddCases(TC_JSONOut, {"\"context\":", MR_Next},
{{"^\\{", MR_Default}, {"\"date\": \"", MR_Next},
{"\"context\":", MR_Next}, {"\"host_name\":", MR_Next},
{"\"date\": \"", MR_Next}, {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", MR_Next},
{"\"host_name\":", MR_Next}, {"\"num_cpus\": %int,$", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", {"\"mhz_per_cpu\": %float,$", MR_Next},
MR_Next}, {"\"cpu_scaling_enabled\": ", MR_Next},
{"\"num_cpus\": %int,$", MR_Next}, {"\"caches\": \\[$", MR_Next}});
{"\"mhz_per_cpu\": %float,$", MR_Next}, auto const &Info = benchmark::CPUInfo::Get();
{"\"cpu_scaling_enabled\": ", MR_Next}, auto const &Caches = Info.caches;
{"\"caches\": \\[$", MR_Next}}); if (!Caches.empty())
auto const& Info = benchmark::CPUInfo::Get(); {
auto const& Caches = Info.caches; AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
if (!Caches.empty()) { }
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); for (size_t I = 0; I < Caches.size(); ++I)
} {
for (size_t I = 0; I < Caches.size(); ++I) { std::string num_caches_str = Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
std::string num_caches_str = AddCases(TC_ConsoleErr, {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, MR_Next}});
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; AddCases(TC_JSONOut, {{"\\{$", MR_Next},
AddCases(TC_ConsoleErr, {"\"type\": \"", MR_Next},
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, {"\"level\": %int,$", MR_Next},
MR_Next}}); {"\"size\": %int,$", MR_Next},
AddCases(TC_JSONOut, {{"\\{$", MR_Next}, {"\"num_sharing\": %int$", MR_Next},
{"\"type\": \"", MR_Next}, {"}[,]{0,1}$", MR_Next}});
{"\"level\": %int,$", MR_Next}, }
{"\"size\": %int,$", MR_Next}, AddCases(TC_JSONOut, {{"],$"}});
{"\"num_sharing\": %int$", MR_Next}, auto const &LoadAvg = Info.load_avg;
{"}[,]{0,1}$", MR_Next}}); if (!LoadAvg.empty())
} {
AddCases(TC_JSONOut, {{"],$"}}); AddCases(TC_ConsoleErr, {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
auto const& LoadAvg = Info.load_avg; }
if (!LoadAvg.empty()) { AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
AddCases(TC_ConsoleErr, return 0;
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
} }
int dummy_register = AddContextCases(); int dummy_register = AddContextCases();
ADD_CASES(TC_CSVOut, {{"%csv_header"}}); ADD_CASES(TC_CSVOut, {{"%csv_header"}});
@ -64,9 +60,11 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ------------------------ Testing Basic Output --------------------------- // // ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_basic(benchmark::State& state) { void BM_basic(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_basic); BENCHMARK(BM_basic);
@ -88,12 +86,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ------------------------ Testing Bytes per Second Output ---------------- // // ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) { void BM_bytes_per_second(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
state.SetBytesProcessed(1); benchmark::DoNotOptimize(state.iterations());
}
state.SetBytesProcessed(1);
} }
BENCHMARK(BM_bytes_per_second); BENCHMARK(BM_bytes_per_second);
@ -117,12 +117,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ------------------------ Testing Items per Second Output ---------------- // // ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_items_per_second(benchmark::State& state) { void BM_items_per_second(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
state.SetItemsProcessed(1); benchmark::DoNotOptimize(state.iterations());
}
state.SetItemsProcessed(1);
} }
BENCHMARK(BM_items_per_second); BENCHMARK(BM_items_per_second);
@ -146,10 +148,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ------------------------ Testing Label Output --------------------------- // // ------------------------ Testing Label Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_label(benchmark::State& state) { void BM_label(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
state.SetLabel("some label"); {
}
state.SetLabel("some label");
} }
BENCHMARK(BM_label); BENCHMARK(BM_label);
@ -173,10 +177,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
// ------------------------ Testing Error Output --------------------------- // // ------------------------ Testing Error Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_error(benchmark::State& state) { void BM_error(benchmark::State &state)
state.SkipWithError("message"); {
for (auto _ : state) { state.SkipWithError("message");
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_error); BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
@ -196,9 +202,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// // // //
// ========================================================================= // // ========================================================================= //
void BM_no_arg_name(benchmark::State& state) { void BM_no_arg_name(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_no_arg_name)->Arg(3); BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
@ -214,9 +222,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ------------------------ Testing Arg Name Output ----------------------- // // ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_arg_name(benchmark::State& state) { void BM_arg_name(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
@ -232,68 +242,70 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ------------------------ Testing Arg Names Output ----------------------- // // ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_arg_names(benchmark::State& state) { void BM_arg_names(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
ADD_CASES(TC_JSONOut, {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}});
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= // // ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ // // ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_BigArgs(benchmark::State& state) { void BM_BigArgs(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, {"^BM_BigArgs/2147483648 %console_report$"}});
{"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= // // ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- // // ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) { void BM_Complexity_O1(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
state.SetComplexityN(state.range(0)); benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
} }
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, {"%RMS", "[ ]*[0-9]+ %"}});
{"%RMS", "[ ]*[0-9]+ %"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= // // ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ // // ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= // // ========================================================================= //
// Test that non-aggregate data is printed by default // Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) { void BM_Repeat(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
// need two repetitions min to be able to output any aggregate output // need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2); BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
{{"^BM_Repeat/repeats:2 %console_report$"}, {"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"}, {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"}, {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -334,13 +346,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat.. // but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3); BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"}, {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"}, {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -388,14 +399,13 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure // median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4); BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
{{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"}, {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"}, {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -451,9 +461,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
// Test that a non-repeated test still prints non-aggregate results even when // Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested // only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) { void BM_RepeatOnce(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
@ -466,40 +478,39 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported // Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
TC_ConsoleOut, {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"}, {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"}, ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
ADD_CASES(TC_JSONOut, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, {"\"repetitions\": 3,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"aggregate_name\": \"mean\",$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, {"\"repetitions\": 3,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
{"\"aggregate_name\": \"median\",$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, {"\"repetitions\": 3,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next}});
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
@ -508,106 +519,99 @@ ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
// Test that non-aggregate data is not displayed. // Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output. // NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything... // But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) { void BM_SummaryDisplay(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly(); BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
TC_ConsoleOut, {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"}, {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"}, ADD_CASES(TC_JSONOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}}); {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
ADD_CASES(TC_JSONOut, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, {"\"repetitions\": 2,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"iterations\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
{"\"aggregate_name\": \"mean\",$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, {"\"repetitions\": 2,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"iterations\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
{"\"aggregate_name\": \"median\",$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, {"\"repetitions\": 2,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"iterations\": 2,$", MR_Next}});
{"\"threads\": 1,$", MR_Next}, ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"\"iterations\": 2,$", MR_Next}}); {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
ADD_CASES(TC_CSVOut, {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit. // Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) { void BM_RepeatTimeUnit(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_RepeatTimeUnit) BENCHMARK(BM_RepeatTimeUnit)->Repetitions(3)->ReportAggregatesOnly()->Unit(benchmark::kMicrosecond);
->Repetitions(3) ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
->ReportAggregatesOnly() {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
->Unit(benchmark::kMicrosecond); {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
ADD_CASES( "]*3$"},
TC_ConsoleOut, {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, "]*3$"}});
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"}, ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ " {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
"]*3$"}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ " {"\"run_type\": \"aggregate\",$", MR_Next},
"]*3$"}}); {"\"repetitions\": 3,$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"threads\": 1,$", MR_Next},
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, {"\"iterations\": 3,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"time_unit\": \"us\",?$"},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
{"\"repetitions\": 3,$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"time_unit\": \"us\",?$"}, {"\"aggregate_name\": \"median\",$", MR_Next},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, {"\"iterations\": 3,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"time_unit\": \"us\",?$"},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"repetitions\": 3,$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"time_unit\": \"us\",?$"}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, {"\"iterations\": 3,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"time_unit\": \"us\",?$"}});
{"\"run_type\": \"aggregate\",$", MR_Next}, ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"repetitions\": 3,$", MR_Next}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"\"threads\": 1,$", MR_Next}, {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
// ========================================================================= // // ========================================================================= //
// -------------------- Testing user-provided statistics ------------------- // // -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= // // ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) { const auto UserStatistics = [](const std::vector<double> &v) { return v.back(); };
return v.back(); void BM_UserStats(benchmark::State &state)
}; {
void BM_UserStats(benchmark::State& state) { for (auto _ : state)
for (auto _ : state) { {
state.SetIterationTime(150 / 10e8); state.SetIterationTime(150 / 10e8);
} }
} }
// clang-format off // clang-format off
BENCHMARK(BM_UserStats) BENCHMARK(BM_UserStats)
@ -633,82 +637,71 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ " {"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}}); "[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES( ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
TC_JSONOut, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"repetitions\": 3,$", MR_Next},
MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"iterations\": 5,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, {"\"repetitions\": 3,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"repetition_index\": 1,$", MR_Next},
MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"threads\": 1,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"iterations\": 5,$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, {"\"repetition_index\": 2,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"threads\": 1,$", MR_Next},
MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"repetition_index\": 2,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"iterations\": 5,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, {"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"iterations\": 3,$", MR_Next},
MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"repetitions\": 3,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, {"\"iterations\": 3,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, {"\"real_time\": %float,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
MR_Next}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"\"repetitions\": 3,$", MR_Next}, {"^\"BM_UserStats/iterations:5/repeats:3/"
{"\"threads\": 1,$", MR_Next}, "manual_time_median\",%csv_report$"},
{"\"aggregate_name\": \"\",$", MR_Next}, {"^\"BM_UserStats/iterations:5/repeats:3/"
{"\"iterations\": 3,$", MR_Next}, "manual_time_stddev\",%csv_report$"},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
// ========================================================================= // // ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ // // ------------------------- Testing StrEscape JSON ------------------------ //
@ -733,10 +726,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
// -------------------------- Testing CsvEscape ---------------------------- // // -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_CSV_Format(benchmark::State& state) { void BM_CSV_Format(benchmark::State &state)
state.SkipWithError("\"freedom\""); {
for (auto _ : state) { state.SkipWithError("\"freedom\"");
} for (auto _ : state)
{
}
} }
BENCHMARK(BM_CSV_Format); BENCHMARK(BM_CSV_Format);
ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
@ -745,4 +740,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -3,109 +3,135 @@
#include <cassert> #include <cassert>
#include <vector> #include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
public: {
virtual bool ReportContext(const Context& context) { public:
return ConsoleReporter::ReportContext(context); virtual bool ReportContext(const Context &context)
}; {
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) { virtual void ReportRuns(const std::vector<Run> &report)
all_runs_.insert(all_runs_.end(), begin(report), end(report)); {
ConsoleReporter::ReportRuns(report); all_runs_.insert(all_runs_.end(), begin(report), end(report));
} ConsoleReporter::ReportRuns(report);
}
TestReporter() {} TestReporter()
virtual ~TestReporter() {} {
}
virtual ~TestReporter()
{
}
mutable std::vector<Run> all_runs_; mutable std::vector<Run> all_runs_;
}; };
struct TestCase { struct TestCase
std::string name; {
bool error_occurred; std::string name;
std::string error_message; bool error_occurred;
std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const { void CheckRun(Run const &run) const
CHECK(name == run.benchmark_name()) {
<< "expected " << name << " got " << run.benchmark_name(); CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred); CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message); CHECK(error_message == run.error_message);
if (error_occurred) { if (error_occurred)
// CHECK(run.iterations == 0); {
} else { // CHECK(run.iterations == 0);
CHECK(run.iterations != 0); }
else
{
CHECK(run.iterations != 0);
}
} }
}
}; };
std::vector<TestCase> ExpectedResults; std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) { int AddCases(const char *base_name, std::initializer_list<TestCase> const &v)
for (auto TC : v) { {
TC.name = base_name + TC.name; for (auto TC : v)
ExpectedResults.push_back(std::move(TC)); {
} TC.name = base_name + TC.name;
return 0; ExpectedResults.push_back(std::move(TC));
}
return 0;
} }
#define CONCAT(x, y) CONCAT2(x, y) #define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y #define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
} // end namespace } // end namespace
void BM_error_no_running(benchmark::State& state) { void BM_error_no_running(benchmark::State &state)
state.SkipWithError("error message"); {
state.SkipWithError("error message");
} }
BENCHMARK(BM_error_no_running); BENCHMARK(BM_error_no_running);
ADD_CASES("BM_error_no_running", {{"", true, "error message"}}); ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
void BM_error_before_running(benchmark::State& state) { void BM_error_before_running(benchmark::State &state)
state.SkipWithError("error message"); {
while (state.KeepRunning()) { state.SkipWithError("error message");
assert(false); while (state.KeepRunning())
} {
assert(false);
}
} }
BENCHMARK(BM_error_before_running); BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_before_running_batch(benchmark::State& state) { void BM_error_before_running_batch(benchmark::State &state)
state.SkipWithError("error message"); {
while (state.KeepRunningBatch(17)) { state.SkipWithError("error message");
assert(false); while (state.KeepRunningBatch(17))
} {
assert(false);
}
} }
BENCHMARK(BM_error_before_running_batch); BENCHMARK(BM_error_before_running_batch);
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
void BM_error_before_running_range_for(benchmark::State& state) { void BM_error_before_running_range_for(benchmark::State &state)
state.SkipWithError("error message"); {
for (auto _ : state) { state.SkipWithError("error message");
assert(false); for (auto _ : state)
} {
assert(false);
}
} }
BENCHMARK(BM_error_before_running_range_for); BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) { void BM_error_during_running(benchmark::State &state)
int first_iter = true; {
while (state.KeepRunning()) { int first_iter = true;
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { while (state.KeepRunning())
assert(first_iter); {
first_iter = false; if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
state.SkipWithError("error message"); {
} else { assert(first_iter);
state.PauseTiming(); first_iter = false;
state.ResumeTiming(); state.SkipWithError("error message");
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
} }
}
} }
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
@ -117,33 +143,37 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""}, {"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}}); {"/2/threads:8", false, ""}});
void BM_error_during_running_ranged_for(benchmark::State& state) { void BM_error_during_running_ranged_for(benchmark::State &state)
assert(state.max_iterations > 3 && "test requires at least a few iterations"); {
int first_iter = true; assert(state.max_iterations > 3 && "test requires at least a few iterations");
// NOTE: Users should not write the for loop explicitly. int first_iter = true;
for (auto It = state.begin(), End = state.end(); It != End; ++It) { // NOTE: Users should not write the for loop explicitly.
if (state.range(0) == 1) { for (auto It = state.begin(), End = state.end(); It != End; ++It)
assert(first_iter); {
first_iter = false; if (state.range(0) == 1)
state.SkipWithError("error message"); {
// Test the unfortunate but documented behavior that the ranged-for loop assert(first_iter);
// doesn't automatically terminate when SkipWithError is set. first_iter = false;
assert(++It != End); state.SkipWithError("error message");
break; // Required behavior // Test the unfortunate but documented behavior that the ranged-for loop
// doesn't automatically terminate when SkipWithError is set.
assert(++It != End);
break; // Required behavior
}
} }
}
} }
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
ADD_CASES("BM_error_during_running_ranged_for", ADD_CASES("BM_error_during_running_ranged_for",
{{"/1/iterations:5", true, "error message"}, {{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}});
{"/2/iterations:5", false, ""}});
void BM_error_after_running(benchmark::State& state) { void BM_error_after_running(benchmark::State &state)
for (auto _ : state) { {
benchmark::DoNotOptimize(state.iterations()); for (auto _ : state)
} {
if (state.thread_index <= (state.threads / 2)) benchmark::DoNotOptimize(state.iterations());
state.SkipWithError("error message"); }
if (state.thread_index <= (state.threads / 2))
state.SkipWithError("error message");
} }
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
@ -151,19 +181,24 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:4", true, "error message"}, {"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}}); {"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) { void BM_error_while_paused(benchmark::State &state)
bool first_iter = true; {
while (state.KeepRunning()) { bool first_iter = true;
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { while (state.KeepRunning())
assert(first_iter); {
first_iter = false; if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
state.PauseTiming(); {
state.SkipWithError("error message"); assert(first_iter);
} else { first_iter = false;
state.PauseTiming(); state.PauseTiming();
state.ResumeTiming(); state.SkipWithError("error message");
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
} }
}
} }
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"}, ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
@ -175,21 +210,23 @@ ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""}, {"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}}); {"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
benchmark::Initialize(&argc, argv); {
benchmark::Initialize(&argc, argv);
TestReporter test_reporter; TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter); benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
assert(EB != ExpectedResults.end()); {
EB->CheckRun(run); assert(EB != ExpectedResults.end());
++EB; EB->CheckRun(run);
} ++EB;
assert(EB == ExpectedResults.end()); }
assert(EB == ExpectedResults.end());
return 0; return 0;
} }

View File

@ -15,54 +15,58 @@ extern "C" {
using benchmark::State; using benchmark::State;
// CHECK-LABEL: test_for_auto_loop: // CHECK-LABEL: test_for_auto_loop:
extern "C" int test_for_auto_loop() { extern "C" int test_for_auto_loop()
State& S = GetState(); {
int x = 42; State &S = GetState();
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv int x = 42;
// CHECK-NEXT: testq %rbx, %rbx // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: je [[LOOP_END:.*]] // CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]]
for (auto _ : S) { for (auto _ : S)
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: {
// CHECK-GNU-NEXT: subq $1, %rbx // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}} // CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-NEXT: jne .L[[LOOP_HEAD]] // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
benchmark::DoNotOptimize(x); // CHECK-NEXT: jne .L[[LOOP_HEAD]]
} benchmark::DoNotOptimize(x);
// CHECK: [[LOOP_END]]: }
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv // CHECK: [[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax // CHECK: movl $101, %eax
// CHECK: ret // CHECK: ret
return 101; return 101;
} }
// CHECK-LABEL: test_while_loop: // CHECK-LABEL: test_while_loop:
extern "C" int test_while_loop() { extern "C" int test_while_loop()
State& S = GetState(); {
int x = 42; State &S = GetState();
int x = 42;
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning()) { while (S.KeepRunning())
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] {
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]] // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
benchmark::DoNotOptimize(x); // CHECK: movq %[[IREG]], [[DEST:.*]]
} benchmark::DoNotOptimize(x);
// CHECK-DAG: movq [[DEST]], %[[IREG]] }
// CHECK-DAG: testq %[[IREG]], %[[IREG]] // CHECK-DAG: movq [[DEST]], %[[IREG]]
// CHECK-DAG: jne .L[[LOOP_BODY]] // CHECK-DAG: testq %[[IREG]], %[[IREG]]
// CHECK-DAG: .L[[LOOP_HEADER]]: // CHECK-DAG: jne .L[[LOOP_BODY]]
// CHECK-DAG: .L[[LOOP_HEADER]]:
// CHECK: cmpb $0 // CHECK: cmpb $0
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]] // CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK: .L[[LOOP_END]]: // CHECK: .L[[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv // CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax // CHECK: movl $101, %eax
// CHECK: ret // CHECK: ret
return 101; return 101;
} }

View File

@ -5,24 +5,27 @@
#include "../src/statistics.h" #include "../src/statistics.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
TEST(StatisticsTest, Mean) { {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); TEST(StatisticsTest, Mean)
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
} }
TEST(StatisticsTest, Median) { TEST(StatisticsTest, Median)
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
} }
TEST(StatisticsTest, StdDev) { TEST(StatisticsTest, StdDev)
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); {
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
1.151086443322134); EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), 1.151086443322134);
} }
} // end namespace } // end namespace

View File

@ -2,152 +2,153 @@
// statistics_test - Unit tests for src/statistics.cc // statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===// //===---------------------------------------------------------------------===//
#include "../src/string_util.h"
#include "../src/internal_macros.h" #include "../src/internal_macros.h"
#include "../src/string_util.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
TEST(StringUtilTest, stoul) { {
{ TEST(StringUtilTest, stoul)
size_t pos = 0; {
EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); {
EXPECT_EQ(1ul, pos); size_t pos = 0;
} EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
{ EXPECT_EQ(1ul, pos);
size_t pos = 0; }
EXPECT_EQ(7ul, benchmark::stoul("7", &pos)); {
EXPECT_EQ(1ul, pos); size_t pos = 0;
} EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
{ EXPECT_EQ(1ul, pos);
size_t pos = 0; }
EXPECT_EQ(135ul, benchmark::stoul("135", &pos)); {
EXPECT_EQ(3ul, pos); size_t pos = 0;
} EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3ul, pos);
}
#if ULONG_MAX == 0xFFFFFFFFul #if ULONG_MAX == 0xFFFFFFFFul
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
EXPECT_EQ(10ul, pos); EXPECT_EQ(10ul, pos);
} }
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20ul, pos); EXPECT_EQ(20ul, pos);
} }
#endif #endif
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2)); EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8)); EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10)); EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16)); EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16)); EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{ {
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
} }
#endif #endif
} }
TEST(StringUtilTest, stoi) { TEST(StringUtilTest, stoi){{size_t pos = 0;
{ EXPECT_EQ(0, benchmark::stoi("0", &pos));
size_t pos = 0; EXPECT_EQ(1ul, pos);
EXPECT_EQ(0, benchmark::stoi("0", &pos)); } // namespace
EXPECT_EQ(1ul, pos); {
}
{
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3ul, pos); EXPECT_EQ(3ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{ {
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
} }
#endif #endif
} }
TEST(StringUtilTest, stod) { TEST(StringUtilTest, stod)
{ {
size_t pos = 0; {
EXPECT_EQ(0.0, benchmark::stod("0", &pos)); size_t pos = 0;
EXPECT_EQ(1ul, pos); EXPECT_EQ(0.0, benchmark::stod("0", &pos));
} EXPECT_EQ(1ul, pos);
{ }
size_t pos = 0; {
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); size_t pos = 0;
EXPECT_EQ(3ul, pos); EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
} EXPECT_EQ(3ul, pos);
{ }
size_t pos = 0; {
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); size_t pos = 0;
EXPECT_EQ(4ul, pos); EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
} EXPECT_EQ(4ul, pos);
{ }
size_t pos = 0; {
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); size_t pos = 0;
EXPECT_EQ(3ul, pos); EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
} EXPECT_EQ(3ul, pos);
{ }
size_t pos = 0; {
/* Note: exactly representable as double */ size_t pos = 0;
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); /* Note: exactly representable as double */
EXPECT_EQ(8ul, pos); EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
} EXPECT_EQ(8ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{ {
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
} }
#endif #endif
} }
} // end namespace } // end namespace

View File

@ -4,24 +4,30 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
template <typename T> template <typename T> class MyFixture : public ::benchmark::Fixture
class MyFixture : public ::benchmark::Fixture { {
public: public:
MyFixture() : data(0) {} MyFixture() : data(0)
{
}
T data; T data;
}; };
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st)
for (auto _ : st) { {
data += 1; for (auto _ : st)
} {
data += 1;
}
} }
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State &st)
for (auto _ : st) { {
data += 1.0; for (auto _ : st)
} {
data += 1.0;
}
} }
BENCHMARK_REGISTER_F(MyFixture, Bar); BENCHMARK_REGISTER_F(MyFixture, Bar);

View File

@ -7,11 +7,10 @@
// @todo: <jpmag> this checks the full output at once; the rule for // @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$". // CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned. // @todo: <jpmag> check that the counters are vertically aligned.
ADD_CASES( ADD_CASES(TC_ConsoleOut,
TC_ConsoleOut, {
{ // keeping these lines long improves readability, so:
// keeping these lines long improves readability, so: // clang-format off
// clang-format off
{"^[-]+$", MR_Next}, {"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
{"^[-]+$", MR_Next}, {"^[-]+$", MR_Next},
@ -46,8 +45,8 @@ ADD_CASES(
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
// clang-format on // clang-format on
}); });
ADD_CASES(TC_CSVOut, {{"%csv_header," ADD_CASES(TC_CSVOut, {{"%csv_header,"
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
@ -55,49 +54,51 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ------------------------- Tabular Counters Output ----------------------- // // ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) { void BM_Counters_Tabular(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters.insert({ }
{"Foo", {1, bm::Counter::kAvgThreads}}, namespace bm = benchmark;
{"Bar", {2, bm::Counter::kAvgThreads}}, state.counters.insert({
{"Baz", {4, bm::Counter::kAvgThreads}}, {"Foo", {1, bm::Counter::kAvgThreads}},
{"Bat", {8, bm::Counter::kAvgThreads}}, {"Bar", {2, bm::Counter::kAvgThreads}},
{"Frob", {16, bm::Counter::kAvgThreads}}, {"Baz", {4, bm::Counter::kAvgThreads}},
{"Lob", {32, bm::Counter::kAvgThreads}}, {"Bat", {8, bm::Counter::kAvgThreads}},
}); {"Frob", {16, bm::Counter::kAvgThreads}},
{"Lob", {32, bm::Counter::kAvgThreads}},
});
} }
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next},
{"\"Bar\": %float,$", MR_Next}, {"\"Bat\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next}, {"\"Foo\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next}, {"\"Frob\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next}, {"\"Lob\": %float$", MR_Next},
{"\"Lob\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}}); "%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) { void CheckTabular(Results const &e)
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); {
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
@ -105,134 +106,138 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// -------------------- Tabular+Rate Counters Output ----------------------- // // -------------------- Tabular+Rate Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) { void BM_CounterRates_Tabular(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters.insert({ }
{"Foo", {1, bm::Counter::kAvgThreadsRate}}, namespace bm = benchmark;
{"Bar", {2, bm::Counter::kAvgThreadsRate}}, state.counters.insert({
{"Baz", {4, bm::Counter::kAvgThreadsRate}}, {"Foo", {1, bm::Counter::kAvgThreadsRate}},
{"Bat", {8, bm::Counter::kAvgThreadsRate}}, {"Bar", {2, bm::Counter::kAvgThreadsRate}},
{"Frob", {16, bm::Counter::kAvgThreadsRate}}, {"Baz", {4, bm::Counter::kAvgThreadsRate}},
{"Lob", {32, bm::Counter::kAvgThreadsRate}}, {"Bat", {8, bm::Counter::kAvgThreadsRate}},
}); {"Frob", {16, bm::Counter::kAvgThreadsRate}},
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
});
} }
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", {"\"run_type\": \"iteration\",$", MR_Next},
MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"Bar\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bat\": %float,$", MR_Next},
{"\"Bar\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next}, {"\"Foo\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next}, {"\"Frob\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next}, {"\"Lob\": %float$", MR_Next},
{"\"Frob\": %float,$", MR_Next}, {"}", MR_Next}});
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}}); "%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckTabularRate(Results const& e) { void CheckTabularRate(Results const &e)
double t = e.DurationCPUTime(); {
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate);
&CheckTabularRate);
// ========================================================================= // // ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- // // ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
// set only some of the counters // set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) { void BM_CounterSet0_Tabular(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters.insert({ }
{"Foo", {10, bm::Counter::kAvgThreads}}, namespace bm = benchmark;
{"Bar", {20, bm::Counter::kAvgThreads}}, state.counters.insert({
{"Baz", {40, bm::Counter::kAvgThreads}}, {"Foo", {10, bm::Counter::kAvgThreads}},
}); {"Bar", {20, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
} }
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next},
{"\"Bar\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next}, {"\"Foo\": %float$", MR_Next},
{"\"Foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}}); "%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet0(Results const& e) { void CheckSet0(Results const &e)
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); {
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
} }
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again. // again.
void BM_CounterSet1_Tabular(benchmark::State& state) { void BM_CounterSet1_Tabular(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters.insert({ }
{"Foo", {15, bm::Counter::kAvgThreads}}, namespace bm = benchmark;
{"Bar", {25, bm::Counter::kAvgThreads}}, state.counters.insert({
{"Baz", {45, bm::Counter::kAvgThreads}}, {"Foo", {15, bm::Counter::kAvgThreads}},
}); {"Bar", {25, bm::Counter::kAvgThreads}},
{"Baz", {45, bm::Counter::kAvgThreads}},
});
} }
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next},
{"\"Bar\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next}, {"\"Foo\": %float$", MR_Next},
{"\"Foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}}); "%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet1(Results const& e) { void CheckSet1(Results const &e)
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); {
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
} }
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
@ -241,40 +246,42 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// ========================================================================= // // ========================================================================= //
// set only some of the counters, different set now. // set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) { void BM_CounterSet2_Tabular(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters.insert({ }
{"Foo", {10, bm::Counter::kAvgThreads}}, namespace bm = benchmark;
{"Bat", {30, bm::Counter::kAvgThreads}}, state.counters.insert({
{"Baz", {40, bm::Counter::kAvgThreads}}, {"Foo", {10, bm::Counter::kAvgThreads}},
}); {"Bat", {30, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
} }
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bat\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next}, {"\"Foo\": %float$", MR_Next},
{"\"Foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}}); ",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet2(Results const& e) { void CheckSet2(Results const &e)
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); {
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
} }
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
@ -282,4 +289,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -22,15 +22,16 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ------------------------- Simple Counters Output ------------------------ // // ------------------------- Simple Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) { void BM_Counters_Simple(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
state.counters["foo"] = 1; {
state.counters["bar"] = 2 * (double)state.iterations(); }
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
} }
BENCHMARK(BM_Counters_Simple); BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -47,11 +48,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSimple(Results const& e) { void CheckSimple(Results const &e)
double its = e.NumIterations(); {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); double its = e.NumIterations();
// check that the value of bar is within 0.1% of the expected value CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); // check that the value of bar is within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
@ -59,71 +61,73 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
// --------------------- Counters+Items+Bytes/s Output --------------------- // // --------------------- Counters+Items+Bytes/s Output --------------------- //
// ========================================================================= // // ========================================================================= //
namespace { namespace
{
int num_calls1 = 0; int num_calls1 = 0;
} }
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { void BM_Counters_WithBytesAndItemsPSec(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
state.counters["foo"] = 1; benchmark::DoNotOptimize(state.iterations());
state.counters["bar"] = ++num_calls1; }
state.SetBytesProcessed(364); state.counters["foo"] = 1;
state.SetItemsProcessed(150); state.counters["bar"] = ++num_calls1;
state.SetBytesProcessed(364);
state.SetItemsProcessed(150);
} }
BENCHMARK(BM_Counters_WithBytesAndItemsPSec); BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"bar=%hrfloat bytes_per_second=%hrfloat/s " "bar=%hrfloat bytes_per_second=%hrfloat/s "
"foo=%hrfloat items_per_second=%hrfloat/s$"}}); "foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"bytes_per_second\": %float,$", MR_Next},
{"\"bytes_per_second\": %float,$", MR_Next}, {"\"foo\": %float,$", MR_Next},
{"\"foo\": %float,$", MR_Next}, {"\"items_per_second\": %float$", MR_Next},
{"\"items_per_second\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}}); "%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckBytesAndItemsPSec(Results const& e) { void CheckBytesAndItemsPSec(Results const &e)
double t = e.DurationCPUTime(); // this (and not real time) is the time used {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the values are within 0.1% of the expected values CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); // check that the values are within 0.1% of the expected values
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec);
&CheckBytesAndItemsPSec);
// ========================================================================= // // ========================================================================= //
// ------------------------- Rate Counters Output -------------------------- // // ------------------------- Rate Counters Output -------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) { void BM_Counters_Rate(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; }
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
} }
BENCHMARK(BM_Counters_Rate); BENCHMARK(BM_Counters_Rate);
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -140,11 +144,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckRate(Results const& e) { void CheckRate(Results const &e)
double t = e.DurationCPUTime(); // this (and not real time) is the time used {
// check that the values are within 0.1% of the expected values double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
@ -152,18 +157,19 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ----------------------- Inverted Counters Output ------------------------ // // ----------------------- Inverted Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Invert(benchmark::State& state) { void BM_Invert(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; }
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
} }
BENCHMARK(BM_Invert); BENCHMARK(BM_Invert);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
{"\"run_name\": \"BM_Invert\",$", MR_Next}, {"\"run_name\": \"BM_Invert\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -180,9 +186,10 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckInvert(Results const& e) { void CheckInvert(Results const &e)
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001); {
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
} }
CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
@ -191,43 +198,42 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
// -------------------------- // // -------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_InvertedRate(benchmark::State& state) { void BM_Counters_InvertedRate(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = }
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert}; namespace bm = benchmark;
state.counters["bar"] = state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert}; state.counters["bar"] = bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
} }
BENCHMARK(BM_Counters_InvertedRate); BENCHMARK(BM_Counters_InvertedRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
"bar=%hrfloats foo=%hrfloats$"}}); "bar=%hrfloats foo=%hrfloats$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_InvertedRate\",$"},
{{"\"name\": \"BM_Counters_InvertedRate\",$"}, {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckInvertedRate(Results const& e) { void CheckInvertedRate(Results const &e)
double t = e.DurationCPUTime(); // this (and not real time) is the time used {
// check that the values are within 0.1% of the expected values double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001); // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate); CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
@ -235,37 +241,37 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
// ------------------------- Thread Counters Output ------------------------ // // ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) { void BM_Counters_Threads(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
state.counters["foo"] = 1; {
state.counters["bar"] = 2; }
state.counters["foo"] = 1;
state.counters["bar"] = 2;
} }
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckThreads(Results const& e) { void CheckThreads(Results const &e)
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); {
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
@ -273,209 +279,207 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ---------------------- ThreadAvg Counters Output ------------------------ // // ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) { void BM_Counters_AvgThreads(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; }
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
} }
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}}); "%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreads(Results const& e) { void CheckAvgThreads(Results const &e)
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); {
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", &CheckAvgThreads);
&CheckAvgThreads);
// ========================================================================= // // ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ // // ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) { void BM_Counters_AvgThreadsRate(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; }
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
} }
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", {"\"run_type\": \"iteration\",$", MR_Next},
MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"}", MR_Next}});
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
"threads:%int\",%csv_report,%float,%float$"}}); "threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreadsRate(Results const& e) { void CheckAvgThreadsRate(Results const &e)
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); {
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", &CheckAvgThreadsRate);
&CheckAvgThreadsRate);
// ========================================================================= // // ========================================================================= //
// ------------------- IterationInvariant Counters Output ------------------ // // ------------------- IterationInvariant Counters Output ------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_IterationInvariant(benchmark::State& state) { void BM_Counters_IterationInvariant(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; }
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
} }
BENCHMARK(BM_Counters_IterationInvariant); BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{{"\"name\": \"BM_Counters_IterationInvariant\",$"}, {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckIterationInvariant(Results const& e) { void CheckIterationInvariant(Results const &e)
double its = e.NumIterations(); {
// check that the values are within 0.1% of the expected value double its = e.NumIterations();
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); // check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", &CheckIterationInvariant);
&CheckIterationInvariant);
// ========================================================================= // // ========================================================================= //
// ----------------- IterationInvariantRate Counters Output ---------------- // // ----------------- IterationInvariantRate Counters Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { void BM_Counters_kIsIterationInvariantRate(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = }
bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; namespace bm = benchmark;
state.counters["bar"] = state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
} }
BENCHMARK(BM_Counters_kIsIterationInvariantRate); BENCHMARK(BM_Counters_kIsIterationInvariantRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", MR_Next},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", {"\"run_type\": \"iteration\",$", MR_Next},
MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"}", MR_Next}});
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
"%float,%float$"}}); "%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckIsIterationInvariantRate(Results const& e) { void CheckIsIterationInvariantRate(Results const &e)
double its = e.NumIterations(); {
double t = e.DurationCPUTime(); // this (and not real time) is the time used double its = e.NumIterations();
// check that the values are within 0.1% of the expected values double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate);
&CheckIsIterationInvariantRate);
// ========================================================================= // // ========================================================================= //
// ------------------- AvgIterations Counters Output ------------------ // // ------------------- AvgIterations Counters Output ------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgIterations(benchmark::State& state) { void BM_Counters_AvgIterations(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; }
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
} }
BENCHMARK(BM_Counters_AvgIterations); BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"},
{{"\"name\": \"BM_Counters_AvgIterations\",$"}, {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterations(Results const& e) { void CheckAvgIterations(Results const &e)
double its = e.NumIterations(); {
// check that the values are within 0.1% of the expected value double its = e.NumIterations();
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); // check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
@ -483,49 +487,52 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
// ----------------- AvgIterationsRate Counters Output ---------------- // // ----------------- AvgIterationsRate Counters Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_kAvgIterationsRate(benchmark::State& state) { void BM_Counters_kAvgIterationsRate(benchmark::State &state)
for (auto _ : state) { {
// This test requires a non-zero CPU time to avoid divide-by-zero for (auto _ : state)
benchmark::DoNotOptimize(state.iterations()); {
} // This test requires a non-zero CPU time to avoid divide-by-zero
namespace bm = benchmark; benchmark::DoNotOptimize(state.iterations());
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; }
state.counters["bar"] = namespace bm = benchmark;
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
} }
BENCHMARK(BM_Counters_kAvgIterationsRate); BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"}", MR_Next}});
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}}); "%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterationsRate(Results const& e) { void CheckAvgIterationsRate(Results const &e)
double its = e.NumIterations(); {
double t = e.DurationCPUTime(); // this (and not real time) is the time used double its = e.NumIterations();
// check that the values are within 0.1% of the expected values double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", &CheckAvgIterationsRate);
&CheckAvgIterationsRate);
// ========================================================================= // // ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -8,161 +8,149 @@
// ------------------------ Thousands Customisation ------------------------ // // ------------------------ Thousands Customisation ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Thousands(benchmark::State& state) { void BM_Counters_Thousands(benchmark::State &state)
for (auto _ : state) { {
} for (auto _ : state)
namespace bm = benchmark; {
state.counters.insert({ }
{"t0_1000000DefaultBase", namespace bm = benchmark;
bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, state.counters.insert({
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, {"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
benchmark::Counter::OneK::kIs1000)}, {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
benchmark::Counter::OneK::kIs1024)}, {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
benchmark::Counter::OneK::kIs1000)}, });
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
});
} }
BENCHMARK(BM_Counters_Thousands)->Repetitions(2); BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
ADD_CASES( ADD_CASES(TC_ConsoleOut, {
TC_ConsoleOut, {"^BM_Counters_Thousands/repeats:2 %console_report "
{ "t0_1000000DefaultBase=1000k "
{"^BM_Counters_Thousands/repeats:2 %console_report " "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t0_1000000DefaultBase=1000k " "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " {"^BM_Counters_Thousands/repeats:2 %console_report "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, "t0_1000000DefaultBase=1000k "
{"^BM_Counters_Thousands/repeats:2 %console_report " "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t0_1000000DefaultBase=1000k " "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " {"^BM_Counters_Thousands/repeats:2_mean %console_report "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
{"^BM_Counters_Thousands/repeats:2_mean %console_report " "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " "t4_1048576Base1024=1024k$"},
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " {"^BM_Counters_Thousands/repeats:2_median %console_report "
"t4_1048576Base1024=1024k$"}, "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
{"^BM_Counters_Thousands/repeats:2_median %console_report " "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " "t4_1048576Base1024=1024k$"},
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
"t4_1048576Base1024=1024k$"}, "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " });
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
}); {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"run_type\": \"iteration\",$", MR_Next},
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, {"\"repetitions\": 2,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"}", MR_Next}});
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"}", MR_Next}}); {"\"run_type\": \"iteration\",$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"repetitions\": 2,$", MR_Next},
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, {"\"repetition_index\": 1,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"iterations\": %int,$", MR_Next}, {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"}", MR_Next}});
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"}", MR_Next}}); {"\"repetitions\": 2,$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"threads\": 1,$", MR_Next},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"}, {"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next}, {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"iterations\": 2,$", MR_Next}, {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"}", MR_Next}});
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
{"}", MR_Next}}); {"\"threads\": 1,$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"aggregate_name\": \"median\",$", MR_Next},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"}, {"\"iterations\": 2,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next}, {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"iterations\": 2,$", MR_Next}, {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next}, {"}", MR_Next}});
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"}", MR_Next}}); {"\"aggregate_name\": \"stddev\",$", MR_Next},
ADD_CASES(TC_JSONOut, {"\"iterations\": 2,$", MR_Next},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"}, {"\"real_time\": %float,$", MR_Next},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"iterations\": 2,$", MR_Next}, {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next}, {"}", MR_Next}});
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}});
ADD_CASES( ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Thousands/"
TC_CSVOut, "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
{{"^\"BM_Counters_Thousands/" "0)*6,1\\.04858e\\+(0)*6$"},
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" {"^\"BM_Counters_Thousands/"
"0)*6,1\\.04858e\\+(0)*6$"}, "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
{"^\"BM_Counters_Thousands/" "0)*6,1\\.04858e\\+(0)*6$"},
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" {"^\"BM_Counters_Thousands/"
"0)*6,1\\.04858e\\+(0)*6$"}, "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
{"^\"BM_Counters_Thousands/" "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." {"^\"BM_Counters_Thousands/"
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
{"^\"BM_Counters_Thousands/" "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckThousands(Results const& e) { void CheckThousands(Results const &e)
if (e.name != "BM_Counters_Thousands/repeats:2") {
return; // Do not check the aggregates! if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates!
// check that the values are within 0.01% of the expected values // check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, 0.0001);
0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands); CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
@ -170,4 +158,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -40,7 +40,8 @@
#include "gtest/internal/gtest-death-test-internal.h" #include "gtest/internal/gtest-death-test-internal.h"
namespace testing { namespace testing
{
// This flag controls the style of death tests. Valid values are "threadsafe", // This flag controls the style of death tests. Valid values are "threadsafe",
// meaning that the death test child process will re-execute the test binary // meaning that the death test child process will re-execute the test binary
@ -51,7 +52,8 @@ GTEST_DECLARE_string_(death_test_style);
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
namespace internal { namespace internal
{
// Returns a Boolean value indicating whether the caller is currently // Returns a Boolean value indicating whether the caller is currently
// executing in the context of the death test child process. Tools such as // executing in the context of the death test child process. Tools such as
@ -60,7 +62,7 @@ namespace internal {
// implementation of death tests. User code MUST NOT use it. // implementation of death tests. User code MUST NOT use it.
GTEST_API_ bool InDeathTestChild(); GTEST_API_ bool InDeathTestChild();
} // namespace internal } // namespace internal
// The following macros are useful for writing death tests. // The following macros are useful for writing death tests.
@ -165,51 +167,51 @@ GTEST_API_ bool InDeathTestChild();
// Asserts that a given statement causes the program to exit, with an // Asserts that a given statement causes the program to exit, with an
// integer exit status that satisfies predicate, and emitting error output // integer exit status that satisfies predicate, and emitting error output
// that matches regex. // that matches regex.
# define ASSERT_EXIT(statement, predicate, regex) \ #define ASSERT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
// Like ASSERT_EXIT, but continues on to successive tests in the // Like ASSERT_EXIT, but continues on to successive tests in the
// test suite, if any: // test suite, if any:
# define EXPECT_EXIT(statement, predicate, regex) \ #define EXPECT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
// Asserts that a given statement causes the program to exit, either by // Asserts that a given statement causes the program to exit, either by
// explicitly exiting with a nonzero exit code or being killed by a // explicitly exiting with a nonzero exit code or being killed by a
// signal, and emitting error output that matches regex. // signal, and emitting error output that matches regex.
# define ASSERT_DEATH(statement, regex) \ #define ASSERT_DEATH(statement, regex) ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Like ASSERT_DEATH, but continues on to successive tests in the // Like ASSERT_DEATH, but continues on to successive tests in the
// test suite, if any: // test suite, if any:
# define EXPECT_DEATH(statement, regex) \ #define EXPECT_DEATH(statement, regex) EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: // Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
// Tests that an exit code describes a normal exit with a given exit code. // Tests that an exit code describes a normal exit with a given exit code.
class GTEST_API_ ExitedWithCode { class GTEST_API_ ExitedWithCode
public: {
explicit ExitedWithCode(int exit_code); public:
bool operator()(int exit_status) const; explicit ExitedWithCode(int exit_code);
private: bool operator()(int exit_status) const;
// No implementation - assignment is unsupported.
void operator=(const ExitedWithCode& other);
const int exit_code_; private:
// No implementation - assignment is unsupported.
void operator=(const ExitedWithCode &other);
const int exit_code_;
}; };
# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA #if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
// Tests that an exit code describes an exit due to termination by a // Tests that an exit code describes an exit due to termination by a
// given signal. // given signal.
// GOOGLETEST_CM0006 DO NOT DELETE // GOOGLETEST_CM0006 DO NOT DELETE
class GTEST_API_ KilledBySignal { class GTEST_API_ KilledBySignal
public: {
explicit KilledBySignal(int signum); public:
bool operator()(int exit_status) const; explicit KilledBySignal(int signum);
private: bool operator()(int exit_status) const;
const int signum_;
private:
const int signum_;
}; };
# endif // !GTEST_OS_WINDOWS #endif // !GTEST_OS_WINDOWS
// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. // EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
// The death testing framework causes this to have interesting semantics, // The death testing framework causes this to have interesting semantics,
@ -254,24 +256,20 @@ class GTEST_API_ KilledBySignal {
// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); // EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
// }, "death"); // }, "death");
// //
# ifdef NDEBUG #ifdef NDEBUG
# define EXPECT_DEBUG_DEATH(statement, regex) \ #define EXPECT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
GTEST_EXECUTE_STATEMENT_(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \ #define ASSERT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
GTEST_EXECUTE_STATEMENT_(statement, regex)
# else #else
# define EXPECT_DEBUG_DEATH(statement, regex) \ #define EXPECT_DEBUG_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
EXPECT_DEATH(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \ #define ASSERT_DEBUG_DEATH(statement, regex) ASSERT_DEATH(statement, regex)
ASSERT_DEATH(statement, regex)
# endif // NDEBUG for EXPECT_DEBUG_DEATH #endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST #endif // GTEST_HAS_DEATH_TEST
// This macro is used for implementing macros such as // This macro is used for implementing macros such as
// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where // EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
@ -308,18 +306,21 @@ class GTEST_API_ KilledBySignal {
// statement unconditionally returns or throws. The Message constructor at // statement unconditionally returns or throws. The Message constructor at
// the end allows the syntax of streaming additional messages into the // the end allows the syntax of streaming additional messages into the
// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. // macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \ #define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
GTEST_LOG_(WARNING) \ { \
<< "Death tests are not supported on this platform.\n" \ GTEST_LOG_(WARNING) << "Death tests are not supported on this platform.\n" \
<< "Statement '" #statement "' cannot be verified."; \ << "Statement '" #statement "' cannot be verified."; \
} else if (::testing::internal::AlwaysFalse()) { \ } \
::testing::internal::RE::PartialMatch(".*", (regex)); \ else if (::testing::internal::AlwaysFalse()) \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ { \
terminator; \ ::testing::internal::RE::PartialMatch(".*", (regex)); \
} else \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
::testing::Message() terminator; \
} \
else \
::testing::Message()
// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and // EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if // ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
@ -327,17 +328,13 @@ class GTEST_API_ KilledBySignal {
// useful when you are combining death test assertions with normal test // useful when you are combining death test assertions with normal test
// assertions in one test. // assertions in one test.
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ #define EXPECT_DEATH_IF_SUPPORTED(statement, regex) EXPECT_DEATH(statement, regex)
EXPECT_DEATH(statement, regex) #define ASSERT_DEATH_IF_SUPPORTED(statement, regex) ASSERT_DEATH(statement, regex)
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
ASSERT_DEATH(statement, regex)
#else #else
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ #define EXPECT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, ) #define ASSERT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return )
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return)
#endif #endif
} // namespace testing } // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_

View File

@ -52,14 +52,14 @@
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
// Ensures that there is at least one operator<< in the global namespace. // Ensures that there is at least one operator<< in the global namespace.
// See Message& operator<<(...) below for why. // See Message& operator<<(...) below for why.
void operator<<(const testing::internal::Secret&, int); void operator<<(const testing::internal::Secret &, int);
namespace testing { namespace testing
{
// The Message class works like an ostream repeater. // The Message class works like an ostream repeater.
// //
@ -87,132 +87,142 @@ namespace testing {
// latter (it causes an access violation if you do). The Message // latter (it causes an access violation if you do). The Message
// class hides this difference by treating a NULL char pointer as // class hides this difference by treating a NULL char pointer as
// "(null)". // "(null)".
class GTEST_API_ Message { class GTEST_API_ Message
private: {
// The type of basic IO manipulators (endl, ends, and flush) for private:
// narrow streams. // The type of basic IO manipulators (endl, ends, and flush) for
typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); // narrow streams.
typedef std::ostream &(*BasicNarrowIoManip)(std::ostream &);
public: public:
// Constructs an empty Message. // Constructs an empty Message.
Message(); Message();
// Copy constructor. // Copy constructor.
Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT Message(const Message &msg) : ss_(new ::std::stringstream)
*ss_ << msg.GetString(); { // NOLINT
} *ss_ << msg.GetString();
// Constructs a Message from a C-string.
explicit Message(const char* str) : ss_(new ::std::stringstream) {
*ss_ << str;
}
// Streams a non-pointer value to this object.
template <typename T>
inline Message& operator <<(const T& val) {
// Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std.
//
// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
// overloads are visible in either the std namespace or the global
// namespace, but not other namespaces, including the testing
// namespace which Google Test's Message class is in.
//
// To allow STL containers (and other types that has a << operator
// defined in the global namespace) to be used in Google Test
// assertions, testing::Message must access the custom << operator
// from the global namespace. With this using declaration,
// overloads of << defined in the global namespace and those
// visible via Koenig lookup are both exposed in this function.
using ::operator <<;
*ss_ << val;
return *this;
}
// Streams a pointer value to this object.
//
// This function is an overload of the previous one. When you
// stream a pointer to a Message, this definition will be used as it
// is more specialized. (The C++ Standard, section
// [temp.func.order].) If you stream a non-pointer, then the
// previous definition will be used.
//
// The reason for this overload is that streaming a NULL pointer to
// ostream is undefined behavior. Depending on the compiler, you
// may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL
// as "(null)".
template <typename T>
inline Message& operator <<(T* const& pointer) { // NOLINT
if (pointer == nullptr) {
*ss_ << "(null)";
} else {
*ss_ << pointer;
} }
return *this;
}
// Since the basic IO manipulators are overloaded for both narrow // Constructs a Message from a C-string.
// and wide streams, we have to provide this specialized definition explicit Message(const char *str) : ss_(new ::std::stringstream)
// of operator <<, even though its body is the same as the {
// templatized version above. Without this definition, streaming *ss_ << str;
// endl or other basic IO manipulators to Message will confuse the }
// compiler.
Message& operator <<(BasicNarrowIoManip val) {
*ss_ << val;
return *this;
}
// Instead of 1/0, we want to see true/false for bool values. // Streams a non-pointer value to this object.
Message& operator <<(bool b) { template <typename T> inline Message &operator<<(const T &val)
return *this << (b ? "true" : "false"); {
} // Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std.
//
// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
// overloads are visible in either the std namespace or the global
// namespace, but not other namespaces, including the testing
// namespace which Google Test's Message class is in.
//
// To allow STL containers (and other types that has a << operator
// defined in the global namespace) to be used in Google Test
// assertions, testing::Message must access the custom << operator
// from the global namespace. With this using declaration,
// overloads of << defined in the global namespace and those
// visible via Koenig lookup are both exposed in this function.
using ::operator<<;
*ss_ << val;
return *this;
}
// These two overloads allow streaming a wide C string to a Message // Streams a pointer value to this object.
// using the UTF-8 encoding. //
Message& operator <<(const wchar_t* wide_c_str); // This function is an overload of the previous one. When you
Message& operator <<(wchar_t* wide_c_str); // stream a pointer to a Message, this definition will be used as it
// is more specialized. (The C++ Standard, section
// [temp.func.order].) If you stream a non-pointer, then the
// previous definition will be used.
//
// The reason for this overload is that streaming a NULL pointer to
// ostream is undefined behavior. Depending on the compiler, you
// may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL
// as "(null)".
template <typename T> inline Message &operator<<(T *const &pointer)
{ // NOLINT
if (pointer == nullptr)
{
*ss_ << "(null)";
}
else
{
*ss_ << pointer;
}
return *this;
}
// Since the basic IO manipulators are overloaded for both narrow
// and wide streams, we have to provide this specialized definition
// of operator <<, even though its body is the same as the
// templatized version above. Without this definition, streaming
// endl or other basic IO manipulators to Message will confuse the
// compiler.
Message &operator<<(BasicNarrowIoManip val)
{
*ss_ << val;
return *this;
}
// Instead of 1/0, we want to see true/false for bool values.
Message &operator<<(bool b)
{
return *this << (b ? "true" : "false");
}
// These two overloads allow streaming a wide C string to a Message
// using the UTF-8 encoding.
Message &operator<<(const wchar_t *wide_c_str);
Message &operator<<(wchar_t *wide_c_str);
#if GTEST_HAS_STD_WSTRING #if GTEST_HAS_STD_WSTRING
// Converts the given wide string to a narrow string using the UTF-8 // Converts the given wide string to a narrow string using the UTF-8
// encoding, and streams the result to this Message object. // encoding, and streams the result to this Message object.
Message& operator <<(const ::std::wstring& wstr); Message &operator<<(const ::std::wstring &wstr);
#endif // GTEST_HAS_STD_WSTRING #endif // GTEST_HAS_STD_WSTRING
// Gets the text streamed to this object so far as an std::string. // Gets the text streamed to this object so far as an std::string.
// Each '\0' character in the buffer is replaced with "\\0". // Each '\0' character in the buffer is replaced with "\\0".
// //
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
std::string GetString() const; std::string GetString() const;
private: private:
// We'll hold the text streamed to this object here. // We'll hold the text streamed to this object here.
const std::unique_ptr< ::std::stringstream> ss_; const std::unique_ptr<::std::stringstream> ss_;
// We declare (but don't implement) this to prevent the compiler // We declare (but don't implement) this to prevent the compiler
// from implementing the assignment operator. // from implementing the assignment operator.
void operator=(const Message&); void operator=(const Message &);
}; };
// Streams a Message to an ostream. // Streams a Message to an ostream.
inline std::ostream& operator <<(std::ostream& os, const Message& sb) { inline std::ostream &operator<<(std::ostream &os, const Message &sb)
return os << sb.GetString(); {
return os << sb.GetString();
} }
namespace internal { namespace internal
{
// Converts a streamable value to an std::string. A NULL pointer is // Converts a streamable value to an std::string. A NULL pointer is
// converted to "(null)". When the input value is a ::string, // converted to "(null)". When the input value is a ::string,
// ::std::string, ::wstring, or ::std::wstring object, each NUL // ::std::string, ::wstring, or ::std::wstring object, each NUL
// character in it is replaced with "\\0". // character in it is replaced with "\\0".
template <typename T> template <typename T> std::string StreamableToString(const T &streamable)
std::string StreamableToString(const T& streamable) { {
return (Message() << streamable).GetString(); return (Message() << streamable).GetString();
} }
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_

View File

@ -36,7 +36,6 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
// Value-parameterized tests allow you to test your code with different // Value-parameterized tests allow you to test your code with different
// parameters without writing multiple copies of the same test. // parameters without writing multiple copies of the same test.
// //
@ -172,7 +171,7 @@ TEST_P(DerivedTest, DoesBlah) {
EXPECT_TRUE(foo.Blah(GetParam())); EXPECT_TRUE(foo.Blah(GetParam()));
} }
#endif // 0 #endif // 0
#include <iterator> #include <iterator>
#include <utility> #include <utility>
@ -181,7 +180,8 @@ TEST_P(DerivedTest, DoesBlah) {
#include "gtest/internal/gtest-param-util.h" #include "gtest/internal/gtest-param-util.h"
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
namespace testing { namespace testing
{
// Functions producing parameter generators. // Functions producing parameter generators.
// //
@ -225,15 +225,14 @@ namespace testing {
// * Condition start < end must be satisfied in order for resulting sequences // * Condition start < end must be satisfied in order for resulting sequences
// to contain any elements. // to contain any elements.
// //
template <typename T, typename IncrementT> template <typename T, typename IncrementT> internal::ParamGenerator<T> Range(T start, T end, IncrementT step)
internal::ParamGenerator<T> Range(T start, T end, IncrementT step) { {
return internal::ParamGenerator<T>( return internal::ParamGenerator<T>(new internal::RangeGenerator<T, IncrementT>(start, end, step));
new internal::RangeGenerator<T, IncrementT>(start, end, step));
} }
template <typename T> template <typename T> internal::ParamGenerator<T> Range(T start, T end)
internal::ParamGenerator<T> Range(T start, T end) { {
return Range(start, end, 1); return Range(start, end, 1);
} }
// ValuesIn() function allows generation of tests with parameters coming from // ValuesIn() function allows generation of tests with parameters coming from
@ -292,23 +291,21 @@ internal::ParamGenerator<T> Range(T start, T end) {
// ValuesIn(l.begin(), l.end())); // ValuesIn(l.begin(), l.end()));
// //
template <typename ForwardIterator> template <typename ForwardIterator>
internal::ParamGenerator< internal::ParamGenerator<typename std::iterator_traits<ForwardIterator>::value_type> ValuesIn(ForwardIterator begin,
typename std::iterator_traits<ForwardIterator>::value_type> ForwardIterator end)
ValuesIn(ForwardIterator begin, ForwardIterator end) { {
typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType; typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
return internal::ParamGenerator<ParamType>( return internal::ParamGenerator<ParamType>(new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
} }
template <typename T, size_t N> template <typename T, size_t N> internal::ParamGenerator<T> ValuesIn(const T (&array)[N])
internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) { {
return ValuesIn(array, array + N); return ValuesIn(array, array + N);
} }
template <class Container> template <class Container> internal::ParamGenerator<typename Container::value_type> ValuesIn(const Container &container)
internal::ParamGenerator<typename Container::value_type> ValuesIn( {
const Container& container) { return ValuesIn(container.begin(), container.end());
return ValuesIn(container.begin(), container.end());
} }
// Values() allows generating tests from explicitly specified list of // Values() allows generating tests from explicitly specified list of
@ -331,9 +328,9 @@ internal::ParamGenerator<typename Container::value_type> ValuesIn(
// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); // INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
// //
// //
template <typename... T> template <typename... T> internal::ValueArray<T...> Values(T... v)
internal::ValueArray<T...> Values(T... v) { {
return internal::ValueArray<T...>(std::move(v)...); return internal::ValueArray<T...>(std::move(v)...);
} }
// Bool() allows generating tests with parameters in a set of (false, true). // Bool() allows generating tests with parameters in a set of (false, true).
@ -356,8 +353,9 @@ internal::ValueArray<T...> Values(T... v) {
// } // }
// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool()); // INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool());
// //
inline internal::ParamGenerator<bool> Bool() { inline internal::ParamGenerator<bool> Bool()
return Values(false, true); {
return Values(false, true);
} }
// Combine() allows the user to combine two or more sequences to produce // Combine() allows the user to combine two or more sequences to produce
@ -406,39 +404,38 @@ inline internal::ParamGenerator<bool> Bool() {
// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest, // INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest,
// Combine(Bool(), Bool())); // Combine(Bool(), Bool()));
// //
template <typename... Generator> template <typename... Generator> internal::CartesianProductHolder<Generator...> Combine(const Generator &...g)
internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) { {
return internal::CartesianProductHolder<Generator...>(g...); return internal::CartesianProductHolder<Generator...>(g...);
} }
#define TEST_P(test_suite_name, test_name) \ #define TEST_P(test_suite_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) : public test_suite_name \
: public test_suite_name { \ { \
public: \ public: \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() \
virtual void TestBody(); \ { \
\ } \
private: \ virtual void TestBody(); \
static int AddToRegistry() { \ \
::testing::UnitTest::GetInstance() \ private: \
->parameterized_test_registry() \ static int AddToRegistry() \
.GetTestSuitePatternHolder<test_suite_name>( \ { \
#test_suite_name, \ ::testing::UnitTest::GetInstance() \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \ ->parameterized_test_registry() \
->AddTestPattern( \ .GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_( \ ->AddTestPattern( \
test_suite_name, test_name)>()); \ GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
return 0; \ new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)>()); \
} \ return 0; \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ } \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
test_name)); \ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)); \
}; \ }; \
int GTEST_TEST_CLASS_NAME_(test_suite_name, \ int GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::gtest_registering_dummy_ = \
test_name)::gtest_registering_dummy_ = \ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \ void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
// The last argument to INSTANTIATE_TEST_SUITE_P allows the user to specify // The last argument to INSTANTIATE_TEST_SUITE_P allows the user to specify
// generator and an optional function or functor that generates custom test name // generator and an optional function or functor that generates custom test name
@ -457,47 +454,40 @@ internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
#define GTEST_GET_FIRST_(first, ...) first #define GTEST_GET_FIRST_(first, ...) first
#define GTEST_GET_SECOND_(first, second, ...) second #define GTEST_GET_SECOND_(first, second, ...) second
#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \ #define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \ static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
gtest_##prefix##test_suite_name##_EvalGenerator_() { \ gtest_##prefix##test_suite_name##_EvalGenerator_() \
return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \ { \
} \ return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \ } \
const ::testing::TestParamInfo<test_suite_name::ParamType>& info) { \ static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
if (::testing::internal::AlwaysFalse()) { \ const ::testing::TestParamInfo<test_suite_name::ParamType> &info) \
::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \ { \
__VA_ARGS__, \ if (::testing::internal::AlwaysFalse()) \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \ { \
DUMMY_PARAM_))); \ ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
auto t = std::make_tuple(__VA_ARGS__); \ __VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, \ auto t = std::make_tuple(__VA_ARGS__); \
"Too Many Args!"); \ static_assert(std::tuple_size<decltype(t)>::value <= 2, "Too Many Args!"); \
} \ } \
return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \ return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \ __VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))))(info); \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \ } \
DUMMY_PARAM_))))(info); \ static int gtest_##prefix##test_suite_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
} \ ::testing::UnitTest::GetInstance() \
static int gtest_##prefix##test_suite_name##_dummy_ \ ->parameterized_test_registry() \
GTEST_ATTRIBUTE_UNUSED_ = \ .GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
::testing::UnitTest::GetInstance() \ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->parameterized_test_registry() \ ->AddTestSuiteInstantiation(#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
.GetTestSuitePatternHolder<test_suite_name>( \ &gtest_##prefix##test_suite_name##_EvalGenerateName_, __FILE__, __LINE__)
#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestSuiteInstantiation( \
#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
&gtest_##prefix##test_suite_name##_EvalGenerateName_, \
__FILE__, __LINE__)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TEST_CASE_P \ #define INSTANTIATE_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \ static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), ""); \
""); \ INSTANTIATE_TEST_SUITE_P
INSTANTIATE_TEST_SUITE_P #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
} // namespace testing } // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_

View File

@ -38,10 +38,10 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
{
// This helper class can be used to mock out Google Test failure reporting // This helper class can be used to mock out Google Test failure reporting
// so that we can test Google Test or code that builds on Google Test. // so that we can test Google Test or code that builds on Google Test.
@ -52,71 +52,73 @@ namespace testing {
// generated in the same thread that created this object or it can intercept // generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with // all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor. // the second argument to the two arguments constructor.
class GTEST_API_ ScopedFakeTestPartResultReporter class GTEST_API_ ScopedFakeTestPartResultReporter : public TestPartResultReporterInterface
: public TestPartResultReporterInterface { {
public: public:
// The two possible mocking modes of this object. // The two possible mocking modes of this object.
enum InterceptMode { enum InterceptMode
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. {
INTERCEPT_ALL_THREADS // Intercepts all failures. INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
}; INTERCEPT_ALL_THREADS // Intercepts all failures.
};
// The c'tor sets this object as the test part result reporter used // The c'tor sets this object as the test part result reporter used
// by Google Test. The 'result' parameter specifies where to report the // by Google Test. The 'result' parameter specifies where to report the
// results. This reporter will only catch failures generated in the current // results. This reporter will only catch failures generated in the current
// thread. DEPRECATED // thread. DEPRECATED
explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); explicit ScopedFakeTestPartResultReporter(TestPartResultArray *result);
// Same as above, but you can choose the interception scope of this object. // Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, TestPartResultArray *result);
TestPartResultArray* result);
// The d'tor restores the previous test part result reporter. // The d'tor restores the previous test part result reporter.
~ScopedFakeTestPartResultReporter() override; ~ScopedFakeTestPartResultReporter() override;
// Appends the TestPartResult object to the TestPartResultArray // Appends the TestPartResult object to the TestPartResultArray
// received in the constructor. // received in the constructor.
// //
// This method is from the TestPartResultReporterInterface // This method is from the TestPartResultReporterInterface
// interface. // interface.
void ReportTestPartResult(const TestPartResult& result) override; void ReportTestPartResult(const TestPartResult &result) override;
private: private:
void Init(); void Init();
const InterceptMode intercept_mode_; const InterceptMode intercept_mode_;
TestPartResultReporterInterface* old_reporter_; TestPartResultReporterInterface *old_reporter_;
TestPartResultArray* const result_; TestPartResultArray *const result_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
}; };
namespace internal { namespace internal
{
// A helper class for implementing EXPECT_FATAL_FAILURE() and // A helper class for implementing EXPECT_FATAL_FAILURE() and
// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given // EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
// TestPartResultArray contains exactly one failure that has the given // TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a // type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated. // non-fatal failure will be generated.
class GTEST_API_ SingleFailureChecker { class GTEST_API_ SingleFailureChecker
public: {
// The constructor remembers the arguments. public:
SingleFailureChecker(const TestPartResultArray* results, // The constructor remembers the arguments.
TestPartResult::Type type, const std::string& substr); SingleFailureChecker(const TestPartResultArray *results, TestPartResult::Type type, const std::string &substr);
~SingleFailureChecker(); ~SingleFailureChecker();
private:
const TestPartResultArray* const results_;
const TestPartResult::Type type_;
const std::string substr_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); private:
const TestPartResultArray *const results_;
const TestPartResult::Type type_;
const std::string substr_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
}; };
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// A set of macros for testing Google Test assertions or code that's expected // A set of macros for testing Google Test assertions or code that's expected
// to generate Google Test fatal failures. It verifies that the given // to generate Google Test fatal failures. It verifies that the given
@ -141,39 +143,47 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// helper macro, due to some peculiarity in how the preprocessor // helper macro, due to some peculiarity in how the preprocessor
// works. The AcceptsMacroThatExpandsToUnprotectedComma test in // works. The AcceptsMacroThatExpandsToUnprotectedComma test in
// gtest_unittest.cc will fail to compile if we do that. // gtest_unittest.cc will fail to compile if we do that.
#define EXPECT_FATAL_FAILURE(statement, substr) \ #define EXPECT_FATAL_FAILURE(statement, substr) \
do { \ do \
class GTestExpectFatalFailureHelper {\ { \
public:\ class GTestExpectFatalFailureHelper \
static void Execute() { statement; }\ { \
};\ public: \
::testing::TestPartResultArray gtest_failures;\ static void Execute() \
::testing::internal::SingleFailureChecker gtest_checker(\ { \
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ statement; \
{\ } \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ }; \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::TestPartResultArray gtest_failures; \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\ ::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
GTestExpectFatalFailureHelper::Execute();\ ::testing::TestPartResult::kFatalFailure, (substr)); \
}\ { \
} while (::testing::internal::AlwaysFalse()) ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ #define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \ do \
class GTestExpectFatalFailureHelper {\ { \
public:\ class GTestExpectFatalFailureHelper \
static void Execute() { statement; }\ { \
};\ public: \
::testing::TestPartResultArray gtest_failures;\ static void Execute() \
::testing::internal::SingleFailureChecker gtest_checker(\ { \
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ statement; \
{\ } \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ }; \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::TestPartResultArray gtest_failures; \
INTERCEPT_ALL_THREADS, &gtest_failures);\ ::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
GTestExpectFatalFailureHelper::Execute();\ ::testing::TestPartResult::kFatalFailure, (substr)); \
}\ { \
} while (::testing::internal::AlwaysFalse()) ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
// A macro for testing Google Test assertions or code that's expected to // A macro for testing Google Test assertions or code that's expected to
// generate Google Test non-fatal failures. It asserts that the given // generate Google Test non-fatal failures. It asserts that the given
@ -207,32 +217,36 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// instead of // instead of
// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) // GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
// to avoid an MSVC warning on unreachable code. // to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \ #define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\ do \
::testing::TestPartResultArray gtest_failures;\ { \
::testing::internal::SingleFailureChecker gtest_checker(\ ::testing::TestPartResultArray gtest_failures; \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \ ::testing::internal::SingleFailureChecker gtest_checker( \
(substr));\ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\ if (::testing::internal::AlwaysTrue()) \
if (::testing::internal::AlwaysTrue()) { statement; }\ { \
}\ statement; \
} while (::testing::internal::AlwaysFalse()) } \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ #define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\ do \
::testing::TestPartResultArray gtest_failures;\ { \
::testing::internal::SingleFailureChecker gtest_checker(\ ::testing::TestPartResultArray gtest_failures; \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \ ::testing::internal::SingleFailureChecker gtest_checker( \
(substr));\ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
&gtest_failures);\ if (::testing::internal::AlwaysTrue()) \
if (::testing::internal::AlwaysTrue()) { statement; }\ { \
}\ statement; \
} while (::testing::internal::AlwaysFalse()) } \
} \
} while (::testing::internal::AlwaysFalse())
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_

View File

@ -32,128 +32,163 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#include <iosfwd>
#include <vector>
#include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-string.h" #include "gtest/internal/gtest-string.h"
#include <iosfwd>
#include <vector>
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
{
// A copyable object representing the result of a test part (i.e. an // A copyable object representing the result of a test part (i.e. an
// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). // assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
// //
// Don't inherit from TestPartResult as its destructor is not virtual. // Don't inherit from TestPartResult as its destructor is not virtual.
class GTEST_API_ TestPartResult { class GTEST_API_ TestPartResult
public: {
// The possible outcomes of a test part (i.e. an assertion or an public:
// explicit SUCCEED(), FAIL(), or ADD_FAILURE()). // The possible outcomes of a test part (i.e. an assertion or an
enum Type { // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
kSuccess, // Succeeded. enum Type
kNonFatalFailure, // Failed but the test can continue. {
kFatalFailure, // Failed and the test should be terminated. kSuccess, // Succeeded.
kSkip // Skipped. kNonFatalFailure, // Failed but the test can continue.
}; kFatalFailure, // Failed and the test should be terminated.
kSkip // Skipped.
};
// C'tor. TestPartResult does NOT have a default constructor. // C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a // Always use this constructor (with parameters) to create a
// TestPartResult object. // TestPartResult object.
TestPartResult(Type a_type, const char* a_file_name, int a_line_number, TestPartResult(Type a_type, const char *a_file_name, int a_line_number, const char *a_message)
const char* a_message) : type_(a_type), file_name_(a_file_name == nullptr ? "" : a_file_name), line_number_(a_line_number),
: type_(a_type), summary_(ExtractSummary(a_message)), message_(a_message)
file_name_(a_file_name == nullptr ? "" : a_file_name), {
line_number_(a_line_number), }
summary_(ExtractSummary(a_message)),
message_(a_message) {}
// Gets the outcome of the test part. // Gets the outcome of the test part.
Type type() const { return type_; } Type type() const
{
return type_;
}
// Gets the name of the source file where the test part took place, or // Gets the name of the source file where the test part took place, or
// NULL if it's unknown. // NULL if it's unknown.
const char* file_name() const { const char *file_name() const
return file_name_.empty() ? nullptr : file_name_.c_str(); {
} return file_name_.empty() ? nullptr : file_name_.c_str();
}
// Gets the line in the source file where the test part took place, // Gets the line in the source file where the test part took place,
// or -1 if it's unknown. // or -1 if it's unknown.
int line_number() const { return line_number_; } int line_number() const
{
return line_number_;
}
// Gets the summary of the failure message. // Gets the summary of the failure message.
const char* summary() const { return summary_.c_str(); } const char *summary() const
{
return summary_.c_str();
}
// Gets the message associated with the test part. // Gets the message associated with the test part.
const char* message() const { return message_.c_str(); } const char *message() const
{
return message_.c_str();
}
// Returns true if and only if the test part was skipped. // Returns true if and only if the test part was skipped.
bool skipped() const { return type_ == kSkip; } bool skipped() const
{
return type_ == kSkip;
}
// Returns true if and only if the test part passed. // Returns true if and only if the test part passed.
bool passed() const { return type_ == kSuccess; } bool passed() const
{
return type_ == kSuccess;
}
// Returns true if and only if the test part non-fatally failed. // Returns true if and only if the test part non-fatally failed.
bool nonfatally_failed() const { return type_ == kNonFatalFailure; } bool nonfatally_failed() const
{
return type_ == kNonFatalFailure;
}
// Returns true if and only if the test part fatally failed. // Returns true if and only if the test part fatally failed.
bool fatally_failed() const { return type_ == kFatalFailure; } bool fatally_failed() const
{
return type_ == kFatalFailure;
}
// Returns true if and only if the test part failed. // Returns true if and only if the test part failed.
bool failed() const { return fatally_failed() || nonfatally_failed(); } bool failed() const
{
return fatally_failed() || nonfatally_failed();
}
private: private:
Type type_; Type type_;
// Gets the summary of the failure message by omitting the stack // Gets the summary of the failure message by omitting the stack
// trace in it. // trace in it.
static std::string ExtractSummary(const char* message); static std::string ExtractSummary(const char *message);
// The name of the source file where the test part took place, or // The name of the source file where the test part took place, or
// "" if the source file is unknown. // "" if the source file is unknown.
std::string file_name_; std::string file_name_;
// The line in the source file where the test part took place, or -1 // The line in the source file where the test part took place, or -1
// if the line number is unknown. // if the line number is unknown.
int line_number_; int line_number_;
std::string summary_; // The test failure summary. std::string summary_; // The test failure summary.
std::string message_; // The test failure message. std::string message_; // The test failure message.
}; };
// Prints a TestPartResult object. // Prints a TestPartResult object.
std::ostream& operator<<(std::ostream& os, const TestPartResult& result); std::ostream &operator<<(std::ostream &os, const TestPartResult &result);
// An array of TestPartResult objects. // An array of TestPartResult objects.
// //
// Don't inherit from TestPartResultArray as its destructor is not // Don't inherit from TestPartResultArray as its destructor is not
// virtual. // virtual.
class GTEST_API_ TestPartResultArray { class GTEST_API_ TestPartResultArray
public: {
TestPartResultArray() {} public:
TestPartResultArray()
{
}
// Appends the given TestPartResult to the array. // Appends the given TestPartResult to the array.
void Append(const TestPartResult& result); void Append(const TestPartResult &result);
// Returns the TestPartResult at the given index (0-based). // Returns the TestPartResult at the given index (0-based).
const TestPartResult& GetTestPartResult(int index) const; const TestPartResult &GetTestPartResult(int index) const;
// Returns the number of TestPartResult objects in the array. // Returns the number of TestPartResult objects in the array.
int size() const; int size() const;
private: private:
std::vector<TestPartResult> array_; std::vector<TestPartResult> array_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
}; };
// This interface knows how to report a test part result. // This interface knows how to report a test part result.
class GTEST_API_ TestPartResultReporterInterface { class GTEST_API_ TestPartResultReporterInterface
public: {
virtual ~TestPartResultReporterInterface() {} public:
virtual ~TestPartResultReporterInterface()
{
}
virtual void ReportTestPartResult(const TestPartResult& result) = 0; virtual void ReportTestPartResult(const TestPartResult &result) = 0;
}; };
namespace internal { namespace internal
{
// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a // This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
// statement generates new fatal failures. To do so it registers itself as the // statement generates new fatal failures. To do so it registers itself as the
@ -161,24 +196,28 @@ namespace internal {
// reported, it only delegates the reporting to the former result reporter. // reported, it only delegates the reporting to the former result reporter.
// The original result reporter is restored in the destructor. // The original result reporter is restored in the destructor.
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
class GTEST_API_ HasNewFatalFailureHelper class GTEST_API_ HasNewFatalFailureHelper : public TestPartResultReporterInterface
: public TestPartResultReporterInterface { {
public: public:
HasNewFatalFailureHelper(); HasNewFatalFailureHelper();
~HasNewFatalFailureHelper() override; ~HasNewFatalFailureHelper() override;
void ReportTestPartResult(const TestPartResult& result) override; void ReportTestPartResult(const TestPartResult &result) override;
bool has_new_fatal_failure() const { return has_new_fatal_failure_; } bool has_new_fatal_failure() const
private: {
bool has_new_fatal_failure_; return has_new_fatal_failure_;
TestPartResultReporterInterface* original_reporter_; }
GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); private:
bool has_new_fatal_failure_;
TestPartResultReporterInterface *original_reporter_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
}; };
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// GOOGLETEST_CM0001 DO NOT DELETE // GOOGLETEST_CM0001 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
@ -101,7 +100,7 @@ TYPED_TEST(FooTest, HasPropertyA) { ... }
// }; // };
// TYPED_TEST_SUITE(FooTest, MyTypes, MyTypeNames); // TYPED_TEST_SUITE(FooTest, MyTypes, MyTypeNames);
#endif // 0 #endif // 0
// Type-parameterized tests are abstract test patterns parameterized // Type-parameterized tests are abstract test patterns parameterized
// by a type. Compared with typed tests, type-parameterized tests // by a type. Compared with typed tests, type-parameterized tests
@ -168,7 +167,7 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// generate custom names. // generate custom names.
// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes, MyTypeNames); // INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes, MyTypeNames);
#endif // 0 #endif // 0
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
#include "gtest/internal/gtest-type-util.h" #include "gtest/internal/gtest-type-util.h"
@ -185,50 +184,38 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// Expands to the name of the typedef for the NameGenerator, responsible for // Expands to the name of the typedef for the NameGenerator, responsible for
// creating the suffixes of the name. // creating the suffixes of the name.
#define GTEST_NAME_GENERATOR_(TestSuiteName) \ #define GTEST_NAME_GENERATOR_(TestSuiteName) gtest_type_params_##TestSuiteName##_NameGenerator
gtest_type_params_##TestSuiteName##_NameGenerator
#define TYPED_TEST_SUITE(CaseName, Types, ...) \ #define TYPED_TEST_SUITE(CaseName, Types, ...) \
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_( \ typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_(CaseName); \
CaseName); \ typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type GTEST_NAME_GENERATOR_(CaseName)
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \
GTEST_NAME_GENERATOR_(CaseName)
# define TYPED_TEST(CaseName, TestName) \ #define TYPED_TEST(CaseName, TestName) \
template <typename gtest_TypeParam_> \ template <typename gtest_TypeParam_> \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) : public CaseName<gtest_TypeParam_> \
: public CaseName<gtest_TypeParam_> { \ { \
private: \ private: \
typedef CaseName<gtest_TypeParam_> TestFixture; \ typedef CaseName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \ typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \ virtual void TestBody(); \
}; \ }; \
static bool gtest_##CaseName##_##TestName##_registered_ \ static bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_ATTRIBUTE_UNUSED_ = \ ::testing::internal::TypeParameterizedTest< \
::testing::internal::TypeParameterizedTest< \ CaseName, ::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
CaseName, \ GTEST_TYPE_PARAMS_(CaseName)>:: \
::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, \ Register( \
TestName)>, \ "", ::testing::internal::CodeLocation(__FILE__, __LINE__), #CaseName, #TestName, 0, \
GTEST_TYPE_PARAMS_( \ ::testing::internal::GenerateNames<GTEST_NAME_GENERATOR_(CaseName), GTEST_TYPE_PARAMS_(CaseName)>()); \
CaseName)>::Register("", \ template <typename gtest_TypeParam_> void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
::testing::internal::CodeLocation( \
__FILE__, __LINE__), \
#CaseName, #TestName, 0, \
::testing::internal::GenerateNames< \
GTEST_NAME_GENERATOR_(CaseName), \
GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> \
void GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)<gtest_TypeParam_>::TestBody()
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE \ #define TYPED_TEST_CASE \
static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \ static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \
TYPED_TEST_SUITE TYPED_TEST_SUITE
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_HAS_TYPED_TEST #endif // GTEST_HAS_TYPED_TEST
// Implements type-parameterized tests. // Implements type-parameterized tests.
@ -245,86 +232,73 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// //
// Expands to the name of the variable used to remember the names of // Expands to the name of the variable used to remember the names of
// the defined tests in the given test suite. // the defined tests in the given test suite.
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \ #define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) gtest_typed_test_suite_p_state_##TestSuiteName##_
gtest_typed_test_suite_p_state_##TestSuiteName##_
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
// //
// Expands to the name of the variable used to remember the names of // Expands to the name of the variable used to remember the names of
// the registered tests in the given test suite. // the registered tests in the given test suite.
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \ #define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) gtest_registered_test_names_##TestSuiteName##_
gtest_registered_test_names_##TestSuiteName##_
// The variables defined in the type-parameterized test macros are // The variables defined in the type-parameterized test macros are
// static as typically these macros are used in a .h file that can be // static as typically these macros are used in a .h file that can be
// #included in multiple translation units linked together. // #included in multiple translation units linked together.
#define TYPED_TEST_SUITE_P(SuiteName) \ #define TYPED_TEST_SUITE_P(SuiteName) \
static ::testing::internal::TypedTestSuitePState \ static ::testing::internal::TypedTestSuitePState GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE_P \ #define TYPED_TEST_CASE_P \
static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \ static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \
TYPED_TEST_SUITE_P TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_P(SuiteName, TestName) \ #define TYPED_TEST_P(SuiteName, TestName) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
template <typename gtest_TypeParam_> \ { \
class TestName : public SuiteName<gtest_TypeParam_> { \ template <typename gtest_TypeParam_> class TestName : public SuiteName<gtest_TypeParam_> \
private: \ { \
typedef SuiteName<gtest_TypeParam_> TestFixture; \ private: \
typedef gtest_TypeParam_ TypeParam; \ typedef SuiteName<gtest_TypeParam_> TestFixture; \
virtual void TestBody(); \ typedef gtest_TypeParam_ TypeParam; \
}; \ virtual void TestBody(); \
static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ }; \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
__FILE__, __LINE__, #SuiteName, #TestName); \ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName(__FILE__, __LINE__, #SuiteName, #TestName); \
} \ } \
template <typename gtest_TypeParam_> \ template <typename gtest_TypeParam_> void GTEST_SUITE_NAMESPACE_(SuiteName)::TestName<gtest_TypeParam_>::TestBody()
void GTEST_SUITE_NAMESPACE_( \
SuiteName)::TestName<gtest_TypeParam_>::TestBody()
#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \ #define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ { \
} \ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
static const char* const GTEST_REGISTERED_TEST_NAMES_( \ } \
SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \ static const char *const GTEST_REGISTERED_TEST_NAMES_(SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames(__FILE__, __LINE__, #__VA_ARGS__)
__FILE__, __LINE__, #__VA_ARGS__)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define REGISTER_TYPED_TEST_CASE_P \ #define REGISTER_TYPED_TEST_CASE_P \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \ static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), ""); \
""); \ REGISTER_TYPED_TEST_SUITE_P
REGISTER_TYPED_TEST_SUITE_P #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \ #define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \ static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTestSuite< \ ::testing::internal::TypeParameterizedTestSuite<SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \ ::testing::internal::TypeList<Types>::type>:: \
::testing::internal::TypeList<Types>::type>:: \ Register(#Prefix, ::testing::internal::CodeLocation(__FILE__, __LINE__), \
Register(#Prefix, \ &GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::CodeLocation(__FILE__, __LINE__), \ ::testing::internal::GenerateNames<::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type, \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, \ ::testing::internal::TypeList<Types>::type>())
GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames< \
::testing::internal::NameGeneratorSelector< \
__VA_ARGS__>::type, \
::testing::internal::TypeList<Types>::type>())
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_CASE_P \ #define INSTANTIATE_TYPED_TEST_CASE_P \
static_assert( \ static_assert(::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \ INSTANTIATE_TYPED_TEST_SUITE_P
INSTANTIATE_TYPED_TEST_SUITE_P #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_HAS_TYPED_TEST_P #endif // GTEST_HAS_TYPED_TEST_P
#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,8 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace testing { namespace testing
{
// This header implements a family of generic predicate assertion // This header implements a family of generic predicate assertion
// macros: // macros:
@ -72,288 +73,177 @@ namespace testing {
// GTEST_ASSERT_ is the basic statement to which all of the assertions // GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code. // in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \ #define GTEST_ASSERT_(expression, on_failure) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (const ::testing::AssertionResult gtest_ar = (expression)) \ if (const ::testing::AssertionResult gtest_ar = (expression)) \
; \ ; \
else \ else \
on_failure(gtest_ar.failure_message()) on_failure(gtest_ar.failure_message())
// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1>
typename T1> AssertionResult AssertPred1Helper(const char *pred_text, const char *e1, Pred pred, const T1 &v1)
AssertionResult AssertPred1Helper(const char* pred_text, {
const char* e1, if (pred(v1))
Pred pred, return AssertionSuccess();
const T1& v1) {
if (pred(v1)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ") evaluates to false, where" << "\n"
<< "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1);
<< e1 << " evaluates to " << ::testing::PrintToString(v1);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ #define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) GTEST_ASSERT_(pred_format(#v1, v1), on_failure)
GTEST_ASSERT_(pred_format(#v1, v1), \
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code. // this in your code.
#define GTEST_PRED1_(pred, v1, on_failure)\ #define GTEST_PRED1_(pred, v1, on_failure) GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, #v1, pred, v1), on_failure)
GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
#v1, \
pred, \
v1), on_failure)
// Unary predicate assertion macros. // Unary predicate assertion macros.
#define EXPECT_PRED_FORMAT1(pred_format, v1) \ #define EXPECT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) #define EXPECT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED1(pred, v1) \ #define ASSERT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED1(pred, v1) \
GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2>
typename T1, AssertionResult AssertPred2Helper(const char *pred_text, const char *e1, const char *e2, Pred pred, const T1 &v1,
typename T2> const T2 &v2)
AssertionResult AssertPred2Helper(const char* pred_text, {
const char* e1, if (pred(v1, v2))
const char* e2, return AssertionSuccess();
Pred pred,
const T1& v1,
const T2& v2) {
if (pred(v1, v2)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ", " << e2 << "\n"
<< ") evaluates to false, where" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2);
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ #define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure) GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), on_failure)
GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code. // this in your code.
#define GTEST_PRED2_(pred, v1, v2, on_failure)\ #define GTEST_PRED2_(pred, v1, v2, on_failure) \
GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, #v1, #v2, pred, v1, v2), on_failure)
#v1, \
#v2, \
pred, \
v1, \
v2), on_failure)
// Binary predicate assertion macros. // Binary predicate assertion macros.
#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ #define EXPECT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) #define EXPECT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED2(pred, v1, v2) \ #define ASSERT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3>
typename T1, AssertionResult AssertPred3Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, Pred pred,
typename T2, const T1 &v1, const T2 &v2, const T3 &v3)
typename T3> {
AssertionResult AssertPred3Helper(const char* pred_text, if (pred(v1, v2, v3))
const char* e1, return AssertionSuccess();
const char* e2,
const char* e3,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3) {
if (pred(v1, v2, v3)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << "\n"
<< ") evaluates to false, where" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e3 << " evaluates to " << ::testing::PrintToString(v3);
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ #define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code. // this in your code.
#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ #define GTEST_PRED3_(pred, v1, v2, v3, on_failure) \
GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, #v1, #v2, #v3, pred, v1, v2, v3), on_failure)
#v1, \
#v2, \
#v3, \
pred, \
v1, \
v2, \
v3), on_failure)
// Ternary predicate assertion macros. // Ternary predicate assertion macros.
#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ #define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED3(pred, v1, v2, v3) \ #define EXPECT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ #define ASSERT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3, typename T4>
typename T1, AssertionResult AssertPred4Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
typename T2, Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4)
typename T3, {
typename T4> if (pred(v1, v2, v3, v4))
AssertionResult AssertPred4Helper(const char* pred_text, return AssertionSuccess();
const char* e1,
const char* e2,
const char* e3,
const char* e4,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4) {
if (pred(v1, v2, v3, v4)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ") evaluates to false, where"
<< ") evaluates to false, where" << "\n"
<< "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n" << e4 << " evaluates to " << ::testing::PrintToString(v4);
<< e4 << " evaluates to " << ::testing::PrintToString(v4);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ #define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code. // this in your code.
#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ #define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, #v1, #v2, #v3, #v4, pred, v1, v2, v3, v4), on_failure)
#v1, \
#v2, \
#v3, \
#v4, \
pred, \
v1, \
v2, \
v3, \
v4), on_failure)
// 4-ary predicate assertion macros. // 4-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ #define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ #define EXPECT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) #define ASSERT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3, typename T4, typename T5>
typename T1, AssertionResult AssertPred5Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
typename T2, const char *e5, Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4,
typename T3, const T5 &v5)
typename T4, {
typename T5> if (pred(v1, v2, v3, v4, v5))
AssertionResult AssertPred5Helper(const char* pred_text, return AssertionSuccess();
const char* e1,
const char* e2,
const char* e3,
const char* e4,
const char* e5,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4,
const T5& v5) {
if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ", " << e5
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ") evaluates to false, where"
<< ", " << e5 << ") evaluates to false, where" << "\n"
<< "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
<< e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n" << e4 << " evaluates to " << ::testing::PrintToString(v4) << "\n"
<< e4 << " evaluates to " << ::testing::PrintToString(v4) << "\n" << e5 << " evaluates to " << ::testing::PrintToString(v5);
<< e5 << " evaluates to " << ::testing::PrintToString(v5);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code. // this in your code.
#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, pred, v1, v2, v3, v4, v5), on_failure)
#v1, \
#v2, \
#v3, \
#v4, \
#v5, \
pred, \
v1, \
v2, \
v3, \
v4, \
v5), on_failure)
// 5-ary predicate assertion macros. // 5-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ #define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) #define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_

View File

@ -55,7 +55,6 @@
// Note: The test class must be in the same namespace as the class being tested. // Note: The test class must be in the same namespace as the class being tested.
// For example, putting MyClassTest in an anonymous namespace will not work. // For example, putting MyClassTest in an anonymous namespace will not work.
#define FRIEND_TEST(test_case_name, test_name)\ #define FRIEND_TEST(test_case_name, test_name) friend class test_case_name##_##test_name##_Test
friend class test_case_name##_##test_name##_Test
#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_

View File

@ -34,4 +34,4 @@
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ #ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ #define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_

View File

@ -39,4 +39,4 @@
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ #ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ #define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_

View File

@ -34,4 +34,4 @@
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ #ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ #define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_
#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_

View File

@ -39,11 +39,13 @@
#include "gtest/gtest-matchers.h" #include "gtest/gtest-matchers.h"
#include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-internal.h"
#include <stdio.h>
#include <memory> #include <memory>
#include <stdio.h>
namespace testing { namespace testing
namespace internal { {
namespace internal
{
GTEST_DECLARE_string_(internal_run_death_test); GTEST_DECLARE_string_(internal_run_death_test);
@ -54,8 +56,7 @@ const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
// DeathTest is a class that hides much of the complexity of the // DeathTest is a class that hides much of the complexity of the
// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method // GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
@ -70,92 +71,110 @@ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
// by wait(2) // by wait(2)
// exit code: The integer code passed to exit(3), _exit(2), or // exit code: The integer code passed to exit(3), _exit(2), or
// returned from main() // returned from main()
class GTEST_API_ DeathTest { class GTEST_API_ DeathTest
public: {
// Create returns false if there was an error determining the public:
// appropriate action to take for the current death test; for example, // Create returns false if there was an error determining the
// if the gtest_death_test_style flag is set to an invalid value. // appropriate action to take for the current death test; for example,
// The LastMessage method will return a more detailed message in that // if the gtest_death_test_style flag is set to an invalid value.
// case. Otherwise, the DeathTest pointer pointed to by the "test" // The LastMessage method will return a more detailed message in that
// argument is set. If the death test should be skipped, the pointer // case. Otherwise, the DeathTest pointer pointed to by the "test"
// is set to NULL; otherwise, it is set to the address of a new concrete // argument is set. If the death test should be skipped, the pointer
// DeathTest object that controls the execution of the current test. // is set to NULL; otherwise, it is set to the address of a new concrete
static bool Create(const char* statement, Matcher<const std::string&> matcher, // DeathTest object that controls the execution of the current test.
const char* file, int line, DeathTest** test); static bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
DeathTest(); DeathTest **test);
virtual ~DeathTest() { } DeathTest();
virtual ~DeathTest()
{
}
// A helper class that aborts a death test when it's deleted. // A helper class that aborts a death test when it's deleted.
class ReturnSentinel { class ReturnSentinel
public: {
explicit ReturnSentinel(DeathTest* test) : test_(test) { } public:
~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } explicit ReturnSentinel(DeathTest *test) : test_(test)
private: {
DeathTest* const test_; }
GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); ~ReturnSentinel()
} GTEST_ATTRIBUTE_UNUSED_; {
test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT);
}
// An enumeration of possible roles that may be taken when a death private:
// test is encountered. EXECUTE means that the death test logic should DeathTest *const test_;
// be executed immediately. OVERSEE means that the program should prepare GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
// the appropriate environment for a child process to execute the death } GTEST_ATTRIBUTE_UNUSED_;
// test, then wait for it to complete.
enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
// An enumeration of the three reasons that a test might be aborted. // An enumeration of possible roles that may be taken when a death
enum AbortReason { // test is encountered. EXECUTE means that the death test logic should
TEST_ENCOUNTERED_RETURN_STATEMENT, // be executed immediately. OVERSEE means that the program should prepare
TEST_THREW_EXCEPTION, // the appropriate environment for a child process to execute the death
TEST_DID_NOT_DIE // test, then wait for it to complete.
}; enum TestRole
{
OVERSEE_TEST,
EXECUTE_TEST
};
// Assumes one of the above roles. // An enumeration of the three reasons that a test might be aborted.
virtual TestRole AssumeRole() = 0; enum AbortReason
{
TEST_ENCOUNTERED_RETURN_STATEMENT,
TEST_THREW_EXCEPTION,
TEST_DID_NOT_DIE
};
// Waits for the death test to finish and returns its status. // Assumes one of the above roles.
virtual int Wait() = 0; virtual TestRole AssumeRole() = 0;
// Returns true if the death test passed; that is, the test process // Waits for the death test to finish and returns its status.
// exited during the test, its exit status matches a user-supplied virtual int Wait() = 0;
// predicate, and its stderr output matches a user-supplied regular
// expression.
// The user-supplied predicate may be a macro expression rather
// than a function pointer or functor, or else Wait and Passed could
// be combined.
virtual bool Passed(bool exit_status_ok) = 0;
// Signals that the death test did not die as expected. // Returns true if the death test passed; that is, the test process
virtual void Abort(AbortReason reason) = 0; // exited during the test, its exit status matches a user-supplied
// predicate, and its stderr output matches a user-supplied regular
// expression.
// The user-supplied predicate may be a macro expression rather
// than a function pointer or functor, or else Wait and Passed could
// be combined.
virtual bool Passed(bool exit_status_ok) = 0;
// Returns a human-readable outcome message regarding the outcome of // Signals that the death test did not die as expected.
// the last death test. virtual void Abort(AbortReason reason) = 0;
static const char* LastMessage();
static void set_last_death_test_message(const std::string& message); // Returns a human-readable outcome message regarding the outcome of
// the last death test.
static const char *LastMessage();
private: static void set_last_death_test_message(const std::string &message);
// A string containing a description of the outcome of the last death test.
static std::string last_death_test_message_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); private:
// A string containing a description of the outcome of the last death test.
static std::string last_death_test_message_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
}; };
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// Factory interface for death tests. May be mocked out for testing. // Factory interface for death tests. May be mocked out for testing.
class DeathTestFactory { class DeathTestFactory
public: {
virtual ~DeathTestFactory() { } public:
virtual bool Create(const char* statement, virtual ~DeathTestFactory()
Matcher<const std::string&> matcher, const char* file, {
int line, DeathTest** test) = 0; }
virtual bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
DeathTest **test) = 0;
}; };
// A concrete DeathTestFactory implementation for normal use. // A concrete DeathTestFactory implementation for normal use.
class DefaultDeathTestFactory : public DeathTestFactory { class DefaultDeathTestFactory : public DeathTestFactory
public: {
bool Create(const char* statement, Matcher<const std::string&> matcher, public:
const char* file, int line, DeathTest** test) override; bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
DeathTest **test) override;
}; };
// Returns true if exit_status describes a process that was terminated // Returns true if exit_status describes a process that was terminated
@ -165,84 +184,91 @@ GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
// A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads // A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads
// and interpreted as a regex (rather than an Eq matcher) for legacy // and interpreted as a regex (rather than an Eq matcher) for legacy
// compatibility. // compatibility.
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(::testing::internal::RE regex)
::testing::internal::RE regex) { {
return ContainsRegex(regex.pattern()); return ContainsRegex(regex.pattern());
} }
inline Matcher<const ::std::string&> MakeDeathTestMatcher(const char* regex) { inline Matcher<const ::std::string &> MakeDeathTestMatcher(const char *regex)
return ContainsRegex(regex); {
return ContainsRegex(regex);
} }
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(const ::std::string &regex)
const ::std::string& regex) { {
return ContainsRegex(regex); return ContainsRegex(regex);
} }
// If a Matcher<const ::std::string&> is passed to EXPECT_DEATH (etc.), it's // If a Matcher<const ::std::string&> is passed to EXPECT_DEATH (etc.), it's
// used directly. // used directly.
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(Matcher<const ::std::string &> matcher)
Matcher<const ::std::string&> matcher) { {
return matcher; return matcher;
} }
// Traps C++ exceptions escaping statement and reports them as test // Traps C++ exceptions escaping statement and reports them as test
// failures. Note that trapping SEH exceptions is not implemented here. // failures. Note that trapping SEH exceptions is not implemented here.
# if GTEST_HAS_EXCEPTIONS #if GTEST_HAS_EXCEPTIONS
# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ #define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
try { \ try \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ { \
} catch (const ::std::exception& gtest_exception) { \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
fprintf(\ } \
stderr, \ catch (const ::std::exception &gtest_exception) \
"\n%s: Caught std::exception-derived exception escaping the " \ { \
"death test statement. Exception message: %s\n", \ fprintf(stderr, \
::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ "\n%s: Caught std::exception-derived exception escaping the " \
gtest_exception.what()); \ "death test statement. Exception message: %s\n", \
fflush(stderr); \ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), gtest_exception.what()); \
death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ fflush(stderr); \
} catch (...) { \ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ } \
} catch (...) \
{ \
death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
}
# else #else
# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ #define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
# endif #endif
// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, // This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
// ASSERT_EXIT*, and EXPECT_EXIT*. // ASSERT_EXIT*, and EXPECT_EXIT*.
#define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \ #define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
::testing::internal::DeathTest* gtest_dt; \ { \
if (!::testing::internal::DeathTest::Create( \ ::testing::internal::DeathTest *gtest_dt; \
#statement, \ if (!::testing::internal::DeathTest::Create(#statement, \
::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \ ::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \
__FILE__, __LINE__, &gtest_dt)) { \ __FILE__, __LINE__, &gtest_dt)) \
goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ { \
} \ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
if (gtest_dt != nullptr) { \ } \
std::unique_ptr< ::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \ if (gtest_dt != nullptr) \
switch (gtest_dt->AssumeRole()) { \ { \
case ::testing::internal::DeathTest::OVERSEE_TEST: \ std::unique_ptr<::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \
if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ switch (gtest_dt->AssumeRole()) \
goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ { \
} \ case ::testing::internal::DeathTest::OVERSEE_TEST: \
break; \ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) \
case ::testing::internal::DeathTest::EXECUTE_TEST: { \ { \
::testing::internal::DeathTest::ReturnSentinel gtest_sentinel( \ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
gtest_dt); \ } \
GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ break; \
gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
break; \ ::testing::internal::DeathTest::ReturnSentinel gtest_sentinel(gtest_dt); \
} \ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
default: \ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
break; \ break; \
} \ } \
} \ default: \
} else \ break; \
GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) \ } \
: fail(::testing::internal::DeathTest::LastMessage()) } \
} \
else \
GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) : fail(::testing::internal::DeathTest::LastMessage())
// The symbol "fail" here expands to something into which a message // The symbol "fail" here expands to something into which a message
// can be streamed. // can be streamed.
@ -251,54 +277,70 @@ inline Matcher<const ::std::string&> MakeDeathTestMatcher(
// must accept a streamed message even though the message is never printed. // must accept a streamed message even though the message is never printed.
// The regex object is not evaluated, but it is used to prevent "unused" // The regex object is not evaluated, but it is used to prevent "unused"
// warnings and to avoid an expression that doesn't compile in debug mode. // warnings and to avoid an expression that doesn't compile in debug mode.
#define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \ #define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ { \
} else if (!::testing::internal::AlwaysTrue()) { \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \ } \
} else \ else if (!::testing::internal::AlwaysTrue()) \
::testing::Message() { \
::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \
} \
else \
::testing::Message()
// A class representing the parsed contents of the // A class representing the parsed contents of the
// --gtest_internal_run_death_test flag, as it existed when // --gtest_internal_run_death_test flag, as it existed when
// RUN_ALL_TESTS was called. // RUN_ALL_TESTS was called.
class InternalRunDeathTestFlag { class InternalRunDeathTestFlag
public: {
InternalRunDeathTestFlag(const std::string& a_file, public:
int a_line, InternalRunDeathTestFlag(const std::string &a_file, int a_line, int an_index, int a_write_fd)
int an_index, : file_(a_file), line_(a_line), index_(an_index), write_fd_(a_write_fd)
int a_write_fd) {
: file_(a_file), line_(a_line), index_(an_index), }
write_fd_(a_write_fd) {}
~InternalRunDeathTestFlag() { ~InternalRunDeathTestFlag()
if (write_fd_ >= 0) {
posix::Close(write_fd_); if (write_fd_ >= 0)
} posix::Close(write_fd_);
}
const std::string& file() const { return file_; } const std::string &file() const
int line() const { return line_; } {
int index() const { return index_; } return file_;
int write_fd() const { return write_fd_; } }
int line() const
{
return line_;
}
int index() const
{
return index_;
}
int write_fd() const
{
return write_fd_;
}
private: private:
std::string file_; std::string file_;
int line_; int line_;
int index_; int index_;
int write_fd_; int write_fd_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
}; };
// Returns a newly created InternalRunDeathTestFlag object with fields // Returns a newly created InternalRunDeathTestFlag object with fields
// initialized from the GTEST_FLAG(internal_run_death_test) flag if // initialized from the GTEST_FLAG(internal_run_death_test) flag if
// the flag is specified; otherwise returns NULL. // the flag is specified; otherwise returns NULL.
InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); InternalRunDeathTestFlag *ParseInternalRunDeathTestFlag();
#endif // GTEST_HAS_DEATH_TEST #endif // GTEST_HAS_DEATH_TEST
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing
#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_

View File

@ -42,11 +42,12 @@
#include "gtest/internal/gtest-string.h" #include "gtest/internal/gtest-string.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
namespace internal { {
namespace internal
{
// FilePath - a class for file and directory pathname manipulation which // FilePath - a class for file and directory pathname manipulation which
// handles platform-specific conventions (like the pathname separator). // handles platform-specific conventions (like the pathname separator).
@ -59,153 +60,165 @@ namespace internal {
// Names are NOT checked for syntax correctness -- no checking for illegal // Names are NOT checked for syntax correctness -- no checking for illegal
// characters, malformed paths, etc. // characters, malformed paths, etc.
class GTEST_API_ FilePath { class GTEST_API_ FilePath
public: {
FilePath() : pathname_("") { } public:
FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } FilePath() : pathname_("")
{
}
FilePath(const FilePath &rhs) : pathname_(rhs.pathname_)
{
}
explicit FilePath(const std::string& pathname) : pathname_(pathname) { explicit FilePath(const std::string &pathname) : pathname_(pathname)
Normalize(); {
} Normalize();
}
FilePath& operator=(const FilePath& rhs) { FilePath &operator=(const FilePath &rhs)
Set(rhs); {
return *this; Set(rhs);
} return *this;
}
void Set(const FilePath& rhs) { void Set(const FilePath &rhs)
pathname_ = rhs.pathname_; {
} pathname_ = rhs.pathname_;
}
const std::string& string() const { return pathname_; } const std::string &string() const
const char* c_str() const { return pathname_.c_str(); } {
return pathname_;
}
const char *c_str() const
{
return pathname_.c_str();
}
// Returns the current working directory, or "" if unsuccessful. // Returns the current working directory, or "" if unsuccessful.
static FilePath GetCurrentDir(); static FilePath GetCurrentDir();
// Given directory = "dir", base_name = "test", number = 0, // Given directory = "dir", base_name = "test", number = 0,
// extension = "xml", returns "dir/test.xml". If number is greater // extension = "xml", returns "dir/test.xml". If number is greater
// than zero (e.g., 12), returns "dir/test_12.xml". // than zero (e.g., 12), returns "dir/test_12.xml".
// On Windows platform, uses \ as the separator rather than /. // On Windows platform, uses \ as the separator rather than /.
static FilePath MakeFileName(const FilePath& directory, static FilePath MakeFileName(const FilePath &directory, const FilePath &base_name, int number,
const FilePath& base_name, const char *extension);
int number,
const char* extension);
// Given directory = "dir", relative_path = "test.xml", // Given directory = "dir", relative_path = "test.xml",
// returns "dir/test.xml". // returns "dir/test.xml".
// On Windows, uses \ as the separator rather than /. // On Windows, uses \ as the separator rather than /.
static FilePath ConcatPaths(const FilePath& directory, static FilePath ConcatPaths(const FilePath &directory, const FilePath &relative_path);
const FilePath& relative_path);
// Returns a pathname for a file that does not currently exist. The pathname // Returns a pathname for a file that does not currently exist. The pathname
// will be directory/base_name.extension or // will be directory/base_name.extension or
// directory/base_name_<number>.extension if directory/base_name.extension // directory/base_name_<number>.extension if directory/base_name.extension
// already exists. The number will be incremented until a pathname is found // already exists. The number will be incremented until a pathname is found
// that does not already exist. // that does not already exist.
// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
// There could be a race condition if two or more processes are calling this // There could be a race condition if two or more processes are calling this
// function at the same time -- they could both pick the same filename. // function at the same time -- they could both pick the same filename.
static FilePath GenerateUniqueFileName(const FilePath& directory, static FilePath GenerateUniqueFileName(const FilePath &directory, const FilePath &base_name, const char *extension);
const FilePath& base_name,
const char* extension);
// Returns true if and only if the path is "". // Returns true if and only if the path is "".
bool IsEmpty() const { return pathname_.empty(); } bool IsEmpty() const
{
return pathname_.empty();
}
// If input name has a trailing separator character, removes it and returns // If input name has a trailing separator character, removes it and returns
// the name, otherwise return the name string unmodified. // the name, otherwise return the name string unmodified.
// On Windows platform, uses \ as the separator, other platforms use /. // On Windows platform, uses \ as the separator, other platforms use /.
FilePath RemoveTrailingPathSeparator() const; FilePath RemoveTrailingPathSeparator() const;
// Returns a copy of the FilePath with the directory part removed. // Returns a copy of the FilePath with the directory part removed.
// Example: FilePath("path/to/file").RemoveDirectoryName() returns // Example: FilePath("path/to/file").RemoveDirectoryName() returns
// FilePath("file"). If there is no directory part ("just_a_file"), it returns // FilePath("file"). If there is no directory part ("just_a_file"), it returns
// the FilePath unmodified. If there is no file part ("just_a_dir/") it // the FilePath unmodified. If there is no file part ("just_a_dir/") it
// returns an empty FilePath (""). // returns an empty FilePath ("").
// On Windows platform, '\' is the path separator, otherwise it is '/'. // On Windows platform, '\' is the path separator, otherwise it is '/'.
FilePath RemoveDirectoryName() const; FilePath RemoveDirectoryName() const;
// RemoveFileName returns the directory path with the filename removed. // RemoveFileName returns the directory path with the filename removed.
// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
// If the FilePath is "a_file" or "/a_file", RemoveFileName returns // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
// not have a file, like "just/a/dir/", it returns the FilePath unmodified. // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
// On Windows platform, '\' is the path separator, otherwise it is '/'. // On Windows platform, '\' is the path separator, otherwise it is '/'.
FilePath RemoveFileName() const; FilePath RemoveFileName() const;
// Returns a copy of the FilePath with the case-insensitive extension removed. // Returns a copy of the FilePath with the case-insensitive extension removed.
// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
// FilePath("dir/file"). If a case-insensitive extension is not // FilePath("dir/file"). If a case-insensitive extension is not
// found, returns a copy of the original FilePath. // found, returns a copy of the original FilePath.
FilePath RemoveExtension(const char* extension) const; FilePath RemoveExtension(const char *extension) const;
// Creates directories so that path exists. Returns true if successful or if // Creates directories so that path exists. Returns true if successful or if
// the directories already exist; returns false if unable to create // the directories already exist; returns false if unable to create
// directories for any reason. Will also return false if the FilePath does // directories for any reason. Will also return false if the FilePath does
// not represent a directory (that is, it doesn't end with a path separator). // not represent a directory (that is, it doesn't end with a path separator).
bool CreateDirectoriesRecursively() const; bool CreateDirectoriesRecursively() const;
// Create the directory so that path exists. Returns true if successful or // Create the directory so that path exists. Returns true if successful or
// if the directory already exists; returns false if unable to create the // if the directory already exists; returns false if unable to create the
// directory for any reason, including if the parent directory does not // directory for any reason, including if the parent directory does not
// exist. Not named "CreateDirectory" because that's a macro on Windows. // exist. Not named "CreateDirectory" because that's a macro on Windows.
bool CreateFolder() const; bool CreateFolder() const;
// Returns true if FilePath describes something in the file-system, // Returns true if FilePath describes something in the file-system,
// either a file, directory, or whatever, and that something exists. // either a file, directory, or whatever, and that something exists.
bool FileOrDirectoryExists() const; bool FileOrDirectoryExists() const;
// Returns true if pathname describes a directory in the file-system // Returns true if pathname describes a directory in the file-system
// that exists. // that exists.
bool DirectoryExists() const; bool DirectoryExists() const;
// Returns true if FilePath ends with a path separator, which indicates that // Returns true if FilePath ends with a path separator, which indicates that
// it is intended to represent a directory. Returns false otherwise. // it is intended to represent a directory. Returns false otherwise.
// This does NOT check that a directory (or file) actually exists. // This does NOT check that a directory (or file) actually exists.
bool IsDirectory() const; bool IsDirectory() const;
// Returns true if pathname describes a root directory. (Windows has one // Returns true if pathname describes a root directory. (Windows has one
// root directory per disk drive.) // root directory per disk drive.)
bool IsRootDirectory() const; bool IsRootDirectory() const;
// Returns true if pathname describes an absolute path. // Returns true if pathname describes an absolute path.
bool IsAbsolutePath() const; bool IsAbsolutePath() const;
private: private:
// Replaces multiple consecutive separators with a single separator. // Replaces multiple consecutive separators with a single separator.
// For example, "bar///foo" becomes "bar/foo". Does not eliminate other // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
// redundancies that might be in a pathname involving "." or "..". // redundancies that might be in a pathname involving "." or "..".
// //
// A pathname with multiple consecutive separators may occur either through // A pathname with multiple consecutive separators may occur either through
// user error or as a result of some scripts or APIs that generate a pathname // user error or as a result of some scripts or APIs that generate a pathname
// with a trailing separator. On other platforms the same API or script // with a trailing separator. On other platforms the same API or script
// may NOT generate a pathname with a trailing "/". Then elsewhere that // may NOT generate a pathname with a trailing "/". Then elsewhere that
// pathname may have another "/" and pathname components added to it, // pathname may have another "/" and pathname components added to it,
// without checking for the separator already being there. // without checking for the separator already being there.
// The script language and operating system may allow paths like "foo//bar" // The script language and operating system may allow paths like "foo//bar"
// but some of the functions in FilePath will not handle that correctly. In // but some of the functions in FilePath will not handle that correctly. In
// particular, RemoveTrailingPathSeparator() only removes one separator, and // particular, RemoveTrailingPathSeparator() only removes one separator, and
// it is called in CreateDirectoriesRecursively() assuming that it will change // it is called in CreateDirectoriesRecursively() assuming that it will change
// a pathname from directory syntax (trailing separator) to filename syntax. // a pathname from directory syntax (trailing separator) to filename syntax.
// //
// On Windows this method also replaces the alternate path separator '/' with // On Windows this method also replaces the alternate path separator '/' with
// the primary path separator '\\', so that for example "bar\\/\\foo" becomes // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
// "bar\\foo". // "bar\\foo".
void Normalize(); void Normalize();
// Returns a pointer to the last occurence of a valid path separator in // Returns a pointer to the last occurence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path // the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found. // separators. Returns NULL if no path separator was found.
const char* FindLastPathSeparator() const; const char *FindLastPathSeparator() const;
std::string pathname_; std::string pathname_;
}; // class FilePath }; // class FilePath
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_

View File

@ -37,71 +37,71 @@
// Determines the platform on which Google Test is compiled. // Determines the platform on which Google Test is compiled.
#ifdef __CYGWIN__ #ifdef __CYGWIN__
# define GTEST_OS_CYGWIN 1 #define GTEST_OS_CYGWIN 1
# elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__) #elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)
# define GTEST_OS_WINDOWS_MINGW 1 #define GTEST_OS_WINDOWS_MINGW 1
# define GTEST_OS_WINDOWS 1 #define GTEST_OS_WINDOWS 1
#elif defined _WIN32 #elif defined _WIN32
# define GTEST_OS_WINDOWS 1 #define GTEST_OS_WINDOWS 1
# ifdef _WIN32_WCE #ifdef _WIN32_WCE
# define GTEST_OS_WINDOWS_MOBILE 1 #define GTEST_OS_WINDOWS_MOBILE 1
# elif defined(WINAPI_FAMILY) #elif defined(WINAPI_FAMILY)
# include <winapifamily.h> #include <winapifamily.h>
# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
# define GTEST_OS_WINDOWS_PHONE 1 #define GTEST_OS_WINDOWS_PHONE 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
# define GTEST_OS_WINDOWS_RT 1 #define GTEST_OS_WINDOWS_RT 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE)
# define GTEST_OS_WINDOWS_PHONE 1 #define GTEST_OS_WINDOWS_PHONE 1
# define GTEST_OS_WINDOWS_TV_TITLE 1 #define GTEST_OS_WINDOWS_TV_TITLE 1
# else #else
// WINAPI_FAMILY defined but no known partition matched. // WINAPI_FAMILY defined but no known partition matched.
// Default to desktop. // Default to desktop.
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# endif #endif
# else #else
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# endif // _WIN32_WCE #endif // _WIN32_WCE
#elif defined __OS2__ #elif defined __OS2__
# define GTEST_OS_OS2 1 #define GTEST_OS_OS2 1
#elif defined __APPLE__ #elif defined __APPLE__
# define GTEST_OS_MAC 1 #define GTEST_OS_MAC 1
# if TARGET_OS_IPHONE #if TARGET_OS_IPHONE
# define GTEST_OS_IOS 1 #define GTEST_OS_IOS 1
# endif #endif
#elif defined __DragonFly__ #elif defined __DragonFly__
# define GTEST_OS_DRAGONFLY 1 #define GTEST_OS_DRAGONFLY 1
#elif defined __FreeBSD__ #elif defined __FreeBSD__
# define GTEST_OS_FREEBSD 1 #define GTEST_OS_FREEBSD 1
#elif defined __Fuchsia__ #elif defined __Fuchsia__
# define GTEST_OS_FUCHSIA 1 #define GTEST_OS_FUCHSIA 1
#elif defined(__GLIBC__) && defined(__FreeBSD_kernel__) #elif defined(__GLIBC__) && defined(__FreeBSD_kernel__)
# define GTEST_OS_GNU_KFREEBSD 1 #define GTEST_OS_GNU_KFREEBSD 1
#elif defined __linux__ #elif defined __linux__
# define GTEST_OS_LINUX 1 #define GTEST_OS_LINUX 1
# if defined __ANDROID__ #if defined __ANDROID__
# define GTEST_OS_LINUX_ANDROID 1 #define GTEST_OS_LINUX_ANDROID 1
# endif #endif
#elif defined __MVS__ #elif defined __MVS__
# define GTEST_OS_ZOS 1 #define GTEST_OS_ZOS 1
#elif defined(__sun) && defined(__SVR4) #elif defined(__sun) && defined(__SVR4)
# define GTEST_OS_SOLARIS 1 #define GTEST_OS_SOLARIS 1
#elif defined(_AIX) #elif defined(_AIX)
# define GTEST_OS_AIX 1 #define GTEST_OS_AIX 1
#elif defined(__hpux) #elif defined(__hpux)
# define GTEST_OS_HPUX 1 #define GTEST_OS_HPUX 1
#elif defined __native_client__ #elif defined __native_client__
# define GTEST_OS_NACL 1 #define GTEST_OS_NACL 1
#elif defined __NetBSD__ #elif defined __NetBSD__
# define GTEST_OS_NETBSD 1 #define GTEST_OS_NETBSD 1
#elif defined __OpenBSD__ #elif defined __OpenBSD__
# define GTEST_OS_OPENBSD 1 #define GTEST_OS_OPENBSD 1
#elif defined __QNX__ #elif defined __QNX__
# define GTEST_OS_QNX 1 #define GTEST_OS_QNX 1
#elif defined(__HAIKU__) #elif defined(__HAIKU__)
#define GTEST_OS_HAIKU 1 #define GTEST_OS_HAIKU 1
#endif // __CYGWIN__ #endif // __CYGWIN__
#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_ #endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_

Some files were not shown because too many files have changed in this diff Show More