SwiftyOpenCC // Clang-Format.

This commit is contained in:
ShikiSuen 2022-04-11 16:58:42 +08:00
parent c03344cf23
commit 32ae6b1e1f
436 changed files with 77727 additions and 63883 deletions

File diff suppressed because it is too large Load Diff

View File

@ -4,41 +4,44 @@
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#include "pybind11/stl.h" #include "pybind11/stl.h"
namespace { namespace
{
namespace py = ::pybind11; namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) { std::vector<std::string> Initialize(const std::vector<std::string> &argv)
{
// The `argv` pointers here become invalid when this function returns, but // The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it // benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below. // so it persists, and replace the pointer below.
static std::string executable_name(argv[0]); static std::string executable_name(argv[0]);
std::vector<char*> ptrs; std::vector<char *> ptrs;
ptrs.reserve(argv.size()); ptrs.reserve(argv.size());
for (auto& arg : argv) { for (auto &arg : argv)
ptrs.push_back(const_cast<char*>(arg.c_str())); {
ptrs.push_back(const_cast<char *>(arg.c_str()));
} }
ptrs[0] = const_cast<char*>(executable_name.c_str()); ptrs[0] = const_cast<char *>(executable_name.c_str());
int argc = static_cast<int>(argv.size()); int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data()); benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv; std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc); remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i) { for (int i = 0; i < argc; ++i)
{
remaining_argv.emplace_back(ptrs[i]); remaining_argv.emplace_back(ptrs[i]);
} }
return remaining_argv; return remaining_argv;
} }
void RegisterBenchmark(const char* name, py::function f) { void RegisterBenchmark(const char *name, py::function f)
benchmark::RegisterBenchmark(name, [f](benchmark::State& state) { {
f(&state); benchmark::RegisterBenchmark(name, [f](benchmark::State &state) { f(&state); });
});
} }
PYBIND11_MODULE(_benchmark, m) { PYBIND11_MODULE(_benchmark, m)
{
m.def("Initialize", Initialize); m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark); m.def("RegisterBenchmark", RegisterBenchmark);
m.def("RunSpecifiedBenchmarks", m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); });
[]() { benchmark::RunSpecifiedBenchmarks(); });
py::class_<benchmark::State>(m, "State") py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning) .def("__bool__", &benchmark::State::KeepRunning)

View File

@ -1,12 +1,13 @@
#include <gnuregex.h> #include <gnuregex.h>
#include <string> #include <string>
int main() { int main()
{
std::string str = "test0159"; std::string str = "test0159";
regex_t re; regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) { if (ec != 0)
{
return ec; return ec;
} }
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
} }

View File

@ -1,14 +1,15 @@
#include <regex.h> #include <regex.h>
#include <string> #include <string>
int main() { int main()
{
std::string str = "test0159"; std::string str = "test0159";
regex_t re; regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) { if (ec != 0)
{
return ec; return ec;
} }
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re); regfree(&re);
return ret; return ret;
} }

View File

@ -1,10 +1,9 @@
#include <regex> #include <regex>
#include <string> #include <string>
int main() { int main()
{
const std::string str = "test0159"; const std::string str = "test0159";
std::regex re; std::regex re;
re = std::regex("^[a-z]+[0-9]+$", re = std::regex("^[a-z]+[0-9]+$", std::regex_constants::extended | std::regex_constants::nosubs);
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1; return std::regex_search(str, re) ? 0 : -1;
} }

View File

@ -1,6 +1,7 @@
#include <chrono> #include <chrono>
int main() { int main()
{
typedef std::chrono::steady_clock Clock; typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now(); Clock::time_point tp = Clock::now();
((void)tp); ((void)tp);

View File

@ -1,4 +1,6 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES #define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h" #include "../src/mutex.h"
int main() {} int main()
{
}

View File

@ -1,13 +1,15 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
void BM_StringCreation(benchmark::State& state) { void BM_StringCreation(benchmark::State &state)
{
while (state.KeepRunning()) while (state.KeepRunning())
std::string empty_string; std::string empty_string;
} }
BENCHMARK(BM_StringCreation); BENCHMARK(BM_StringCreation);
void BM_StringCopy(benchmark::State& state) { void BM_StringCopy(benchmark::State &state)
{
std::string x = "hello"; std::string x = "hello";
while (state.KeepRunning()) while (state.KeepRunning())
std::string copy(x); std::string copy(x);

View File

@ -3,8 +3,10 @@
#include "internal_macros.h" #include "internal_macros.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// The arraysize(arr) macro returns the # of elements in an array arr. // The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be // The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on // used in defining new arrays, for example. If you use arraysize on
@ -14,15 +16,13 @@ namespace internal {
// This template function declaration is used in defining arraysize. // This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only // Note that the function doesn't need an implementation, as we only
// use its type. // use its type.
template <typename T, size_t N> template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for // That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of // its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier. // template overloads: the final frontier.
#ifndef COMPILER_MSVC #ifndef COMPILER_MSVC
template <typename T, size_t N> template <typename T, size_t N> char (&ArraySizeHelper(const T (&array)[N]))[N];
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif #endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) #define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))

View File

@ -106,31 +106,25 @@ DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output // The level of verbose logging to output
DEFINE_int32(v, 0); DEFINE_int32(v, 0);
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
// FIXME: wouldn't LTO mess this up? // FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {} void UseCharPointer(char const volatile *)
{
}
} // namespace internal } // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t>& ranges, State::State(IterationCount max_iters, const std::vector<int64_t> &ranges, int thread_i, int n_threads,
int thread_i, int n_threads, internal::ThreadTimer* timer, internal::ThreadTimer *timer, internal::ThreadManager *manager)
internal::ThreadManager* manager) : total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false),
: total_iterations_(0), error_occurred_(false), range_(ranges), complexity_n_(0), counters(), thread_index(thread_i), threads(n_threads),
batch_leftover_(0), timer_(timer), manager_(manager)
max_iterations(max_iters), {
started_(false),
finished_(false),
error_occurred_(false),
range_(ranges),
complexity_n_(0),
counters(),
thread_index(thread_i),
threads(n_threads),
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run"; CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
@ -149,9 +143,7 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif #endif
// Offset tests to ensure commonly accessed data is on the first cache line. // Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64; const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <= static_assert(offsetof(State, error_occurred_) <= (cache_line_size - sizeof(error_occurred_)), "");
(cache_line_size - sizeof(error_occurred_)),
"");
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#pragma warning pop #pragma warning pop
#elif defined(__GNUC__) #elif defined(__GNUC__)
@ -159,51 +151,62 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif #endif
} }
void State::PauseTiming() { void State::PauseTiming()
{
// Add in time accumulated so far // Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_); CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer(); timer_->StopTimer();
} }
void State::ResumeTiming() { void State::ResumeTiming()
{
CHECK(started_ && !finished_ && !error_occurred_); CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer(); timer_->StartTimer();
} }
void State::SkipWithError(const char* msg) { void State::SkipWithError(const char *msg)
{
CHECK(msg); CHECK(msg);
error_occurred_ = true; error_occurred_ = true;
{ {
MutexLock l(manager_->GetBenchmarkMutex()); MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) { if (manager_->results.has_error_ == false)
{
manager_->results.error_message_ = msg; manager_->results.error_message_ = msg;
manager_->results.has_error_ = true; manager_->results.has_error_ = true;
} }
} }
total_iterations_ = 0; total_iterations_ = 0;
if (timer_->running()) timer_->StopTimer(); if (timer_->running())
timer_->StopTimer();
} }
void State::SetIterationTime(double seconds) { void State::SetIterationTime(double seconds)
{
timer_->SetIterationTime(seconds); timer_->SetIterationTime(seconds);
} }
void State::SetLabel(const char* label) { void State::SetLabel(const char *label)
{
MutexLock l(manager_->GetBenchmarkMutex()); MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label; manager_->results.report_label_ = label;
} }
void State::StartKeepRunning() { void State::StartKeepRunning()
{
CHECK(!started_ && !finished_); CHECK(!started_ && !finished_);
started_ = true; started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations; total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier(); manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming(); if (!error_occurred_)
ResumeTiming();
} }
void State::FinishKeepRunning() { void State::FinishKeepRunning()
{
CHECK(started_ && (!finished_ || error_occurred_)); CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) { if (!error_occurred_)
{
PauseTiming(); PauseTiming();
} }
// Total iterations has now wrapped around past 0. Fix this. // Total iterations has now wrapped around past 0. Fix this.
@ -212,12 +215,14 @@ void State::FinishKeepRunning() {
manager_->StartStopBarrier(); manager_->StartStopBarrier();
} }
namespace internal { namespace internal
namespace { {
namespace
{
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks, void RunBenchmarks(const std::vector<BenchmarkInstance> &benchmarks, BenchmarkReporter *display_reporter,
BenchmarkReporter* display_reporter, BenchmarkReporter *file_reporter)
BenchmarkReporter* file_reporter) { {
// Note the file_reporter can be null. // Note the file_reporter can be null.
CHECK(display_reporter != nullptr); CHECK(display_reporter != nullptr);
@ -225,15 +230,16 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10; size_t name_field_width = 10;
size_t stat_field_width = 0; size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) { for (const BenchmarkInstance &benchmark : benchmarks)
name_field_width = {
std::max<size_t>(name_field_width, benchmark.name.str().size()); name_field_width = std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1; might_have_aggregates |= benchmark.repetitions > 1;
for (const auto& Stat : *benchmark.statistics) for (const auto &Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size()); stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
} }
if (might_have_aggregates) name_field_width += 1 + stat_field_width; if (might_have_aggregates)
name_field_width += 1 + stat_field_width;
// Print header here // Print header here
BenchmarkReporter::Context context; BenchmarkReporter::Context context;
@ -244,22 +250,23 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
// We flush streams after invoking reporter methods that write to them. This // We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered. // ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter* reporter) { auto flushStreams = [](BenchmarkReporter *reporter) {
if (!reporter) return; if (!reporter)
return;
std::flush(reporter->GetOutputStream()); std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream()); std::flush(reporter->GetErrorStream());
}; };
if (display_reporter->ReportContext(context) && if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context)))
(!file_reporter || file_reporter->ReportContext(context))) { {
flushStreams(display_reporter); flushStreams(display_reporter);
flushStreams(file_reporter); flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) { for (const auto &benchmark : benchmarks)
{
RunResults run_results = RunBenchmark(benchmark, &complexity_reports); RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter* reporter, auto report = [&run_results](BenchmarkReporter *reporter, bool report_aggregates_only) {
bool report_aggregates_only) {
assert(reporter); assert(reporter);
// If there are no aggregates, do output non-aggregates. // If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty(); report_aggregates_only &= !run_results.aggregates_only.empty();
@ -278,7 +285,8 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
} }
} }
display_reporter->Finalize(); display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize(); if (file_reporter)
file_reporter->Finalize();
flushStreams(display_reporter); flushStreams(display_reporter);
flushStreams(file_reporter); flushStreams(file_reporter);
} }
@ -290,16 +298,23 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif #endif
std::unique_ptr<BenchmarkReporter> CreateReporter( std::unique_ptr<BenchmarkReporter> CreateReporter(std::string const &name, ConsoleReporter::OutputOptions output_opts)
std::string const& name, ConsoleReporter::OutputOptions output_opts) { {
typedef std::unique_ptr<BenchmarkReporter> PtrType; typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") { if (name == "console")
{
return PtrType(new ConsoleReporter(output_opts)); return PtrType(new ConsoleReporter(output_opts));
} else if (name == "json") { }
else if (name == "json")
{
return PtrType(new JSONReporter); return PtrType(new JSONReporter);
} else if (name == "csv") { }
else if (name == "csv")
{
return PtrType(new CSVReporter); return PtrType(new CSVReporter);
} else { }
else
{
std::cerr << "Unexpected format: '" << name << "'\n"; std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1); std::exit(1);
} }
@ -311,29 +326,39 @@ std::unique_ptr<BenchmarkReporter> CreateReporter(
} // end namespace } // end namespace
bool IsZero(double n) { bool IsZero(double n)
{
return std::abs(n) < std::numeric_limits<double>::epsilon(); return std::abs(n) < std::numeric_limits<double>::epsilon();
} }
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color)
{
int output_opts = ConsoleReporter::OO_Defaults; int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool { auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color) { if (force_no_color)
{
return false; return false;
} }
if (FLAGS_benchmark_color == "auto") { if (FLAGS_benchmark_color == "auto")
{
return IsColorTerminal(); return IsColorTerminal();
} }
return IsTruthyFlagValue(FLAGS_benchmark_color); return IsTruthyFlagValue(FLAGS_benchmark_color);
}; };
if (is_benchmark_color()) { if (is_benchmark_color())
{
output_opts |= ConsoleReporter::OO_Color; output_opts |= ConsoleReporter::OO_Color;
} else { }
else
{
output_opts &= ~ConsoleReporter::OO_Color; output_opts &= ~ConsoleReporter::OO_Color;
} }
if (FLAGS_benchmark_counters_tabular) { if (FLAGS_benchmark_counters_tabular)
{
output_opts |= ConsoleReporter::OO_Tabular; output_opts |= ConsoleReporter::OO_Tabular;
} else { }
else
{
output_opts &= ~ConsoleReporter::OO_Tabular; output_opts &= ~ConsoleReporter::OO_Tabular;
} }
return static_cast<ConsoleReporter::OutputOptions>(output_opts); return static_cast<ConsoleReporter::OutputOptions>(output_opts);
@ -341,16 +366,18 @@ ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
} // end namespace internal } // end namespace internal
size_t RunSpecifiedBenchmarks() { size_t RunSpecifiedBenchmarks()
{
return RunSpecifiedBenchmarks(nullptr, nullptr); return RunSpecifiedBenchmarks(nullptr, nullptr);
} }
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter)
{
return RunSpecifiedBenchmarks(display_reporter, nullptr); return RunSpecifiedBenchmarks(display_reporter, nullptr);
} }
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter, BenchmarkReporter *file_reporter)
BenchmarkReporter* file_reporter) { {
std::string spec = FLAGS_benchmark_filter; std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all") if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks spec = "."; // Regexp that matches all benchmarks
@ -359,30 +386,33 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
std::ofstream output_file; std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter; std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter; std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) { if (!display_reporter)
default_display_reporter = internal::CreateReporter( {
FLAGS_benchmark_format, internal::GetOutputOptions()); default_display_reporter = internal::CreateReporter(FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get(); display_reporter = default_display_reporter.get();
} }
auto& Out = display_reporter->GetOutputStream(); auto &Out = display_reporter->GetOutputStream();
auto& Err = display_reporter->GetErrorStream(); auto &Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out; std::string const &fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) { if (fname.empty() && file_reporter)
{
Err << "A custom file reporter was provided but " Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified." "--benchmark_out=<file> was not specified."
<< std::endl; << std::endl;
std::exit(1); std::exit(1);
} }
if (!fname.empty()) { if (!fname.empty())
{
output_file.open(fname); output_file.open(fname);
if (!output_file.is_open()) { if (!output_file.is_open())
{
Err << "invalid file name: '" << fname << std::endl; Err << "invalid file name: '" << fname << std::endl;
std::exit(1); std::exit(1);
} }
if (!file_reporter) { if (!file_reporter)
default_file_reporter = internal::CreateReporter( {
FLAGS_benchmark_out_format, ConsoleReporter::OO_None); default_file_reporter = internal::CreateReporter(FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get(); file_reporter = default_file_reporter.get();
} }
file_reporter->SetOutputStream(&output_file); file_reporter->SetOutputStream(&output_file);
@ -390,32 +420,39 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
} }
std::vector<internal::BenchmarkInstance> benchmarks; std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; if (!FindBenchmarksInternal(spec, &benchmarks, &Err))
return 0;
if (benchmarks.empty()) { if (benchmarks.empty())
{
Err << "Failed to match any benchmarks against regex: " << spec << "\n"; Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0; return 0;
} }
if (FLAGS_benchmark_list_tests) { if (FLAGS_benchmark_list_tests)
for (auto const& benchmark : benchmarks) {
for (auto const &benchmark : benchmarks)
Out << benchmark.name.str() << "\n"; Out << benchmark.name.str() << "\n";
} else { }
else
{
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
} }
return benchmarks.size(); return benchmarks.size();
} }
void RegisterMemoryManager(MemoryManager* manager) { void RegisterMemoryManager(MemoryManager *manager)
{
internal::memory_manager = manager; internal::memory_manager = manager;
} }
namespace internal { namespace internal
{
void PrintUsageAndExit() { void PrintUsageAndExit()
fprintf(stdout, {
"benchmark" fprintf(stdout, "benchmark"
" [--benchmark_list_tests={true|false}]\n" " [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n" " [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n" " [--benchmark_min_time=<min_time>]\n"
@ -431,67 +468,69 @@ void PrintUsageAndExit() {
exit(0); exit(0);
} }
void ParseCommandLineFlags(int* argc, char** argv) { void ParseCommandLineFlags(int *argc, char **argv)
{
using namespace benchmark; using namespace benchmark;
BenchmarkReporter::Context::executable_name = BenchmarkReporter::Context::executable_name = (argc && *argc > 0) ? argv[0] : "unknown";
(argc && *argc > 0) ? argv[0] : "unknown"; for (int i = 1; argc && i < *argc; ++i)
for (int i = 1; argc && i < *argc; ++i) { {
if (ParseBoolFlag(argv[i], "benchmark_list_tests", if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) ||
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time", ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) ||
&FLAGS_benchmark_min_time) || ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) ||
ParseInt32Flag(argv[i], "benchmark_repetitions", ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) ||
&FLAGS_benchmark_repetitions) || ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", &FLAGS_benchmark_display_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
&FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format", ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) ||
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color". // "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this. // TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular", ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) ||
&FLAGS_benchmark_counters_tabular) || ParseInt32Flag(argv[i], "v", &FLAGS_v))
ParseInt32Flag(argv[i], "v", &FLAGS_v)) { {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; for (int j = i; j != *argc - 1; ++j)
argv[j] = argv[j + 1];
--(*argc); --(*argc);
--i; --i;
} else if (IsFlag(argv[i], "help")) { }
else if (IsFlag(argv[i], "help"))
{
PrintUsageAndExit(); PrintUsageAndExit();
} }
} }
for (auto const* flag : for (auto const *flag : {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) if (*flag != "console" && *flag != "json" && *flag != "csv")
if (*flag != "console" && *flag != "json" && *flag != "csv") { {
PrintUsageAndExit(); PrintUsageAndExit();
} }
if (FLAGS_benchmark_color.empty()) { if (FLAGS_benchmark_color.empty())
{
PrintUsageAndExit(); PrintUsageAndExit();
} }
} }
int InitializeStreams() { int InitializeStreams()
{
static std::ios_base::Init init; static std::ios_base::Init init;
return 0; return 0;
} }
} // end namespace internal } // end namespace internal
void Initialize(int* argc, char** argv) { void Initialize(int *argc, char **argv)
{
internal::ParseCommandLineFlags(argc, argv); internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v; internal::LogLevel() = FLAGS_v;
} }
bool ReportUnrecognizedArguments(int argc, char** argv) { bool ReportUnrecognizedArguments(int argc, char **argv)
for (int i = 1; i < argc; ++i) { {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], for (int i = 1; i < argc; ++i)
argv[i]); {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
} }
return argc > 1; return argc > 1;
} }

View File

@ -1,15 +1,17 @@
#include "benchmark_api_internal.h" #include "benchmark_api_internal.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
State BenchmarkInstance::Run(IterationCount iters, int thread_id, State BenchmarkInstance::Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadTimer* timer, internal::ThreadManager *manager) const
internal::ThreadManager* manager) const { {
State st(iters, arg, thread_id, threads, timer, manager); State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st); benchmark->Run(st);
return st; return st;
} }
} // internal } // namespace internal
} // benchmark } // namespace benchmark

View File

@ -11,13 +11,16 @@
#include <string> #include <string>
#include <vector> #include <vector>
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// Information kept per benchmark we may want to run // Information kept per benchmark we may want to run
struct BenchmarkInstance { struct BenchmarkInstance
{
BenchmarkName name; BenchmarkName name;
Benchmark* benchmark; Benchmark *benchmark;
AggregationReportMode aggregation_report_mode; AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg; std::vector<int64_t> arg;
TimeUnit time_unit; TimeUnit time_unit;
@ -26,22 +29,20 @@ struct BenchmarkInstance {
bool use_real_time; bool use_real_time;
bool use_manual_time; bool use_manual_time;
BigO complexity; BigO complexity;
BigOFunc* complexity_lambda; BigOFunc *complexity_lambda;
UserCounters counters; UserCounters counters;
const std::vector<Statistics>* statistics; const std::vector<Statistics> *statistics;
bool last_benchmark_instance; bool last_benchmark_instance;
int repetitions; int repetitions;
double min_time; double min_time;
IterationCount iterations; IterationCount iterations;
int threads; // Number of concurrent threads to us int threads; // Number of concurrent threads to us
State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, State Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager* manager) const; internal::ThreadManager *manager) const;
}; };
bool FindBenchmarksInternal(const std::string& re, bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool IsZero(double n); bool IsZero(double n);

View File

@ -14,26 +14,34 @@
#include <benchmark/benchmark.h> #include <benchmark/benchmark.h>
namespace benchmark { namespace benchmark
{
namespace { namespace
{
// Compute the total size of a pack of std::strings // Compute the total size of a pack of std::strings
size_t size_impl() { return 0; } size_t size_impl()
{
return 0;
}
template <typename Head, typename... Tail> template <typename Head, typename... Tail> size_t size_impl(const Head &head, const Tail &...tail)
size_t size_impl(const Head& head, const Tail&... tail) { {
return head.size() + size_impl(tail...); return head.size() + size_impl(tail...);
} }
// Join a pack of std::strings using a delimiter // Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin // TODO: use absl::StrJoin
void join_impl(std::string&, char) {} void join_impl(std::string &, char)
{
}
template <typename Head, typename... Tail> template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head, void join_impl(std::string &s, const char delimiter, const Head &head, const Tail &...tail)
const Tail&... tail) { {
if (!s.empty() && !head.empty()) { if (!s.empty() && !head.empty())
{
s += delimiter; s += delimiter;
} }
@ -42,8 +50,8 @@ void join_impl(std::string& s, const char delimiter, const Head& head,
join_impl(s, delimiter, tail...); join_impl(s, delimiter, tail...);
} }
template <typename... Ts> template <typename... Ts> std::string join(char delimiter, const Ts &...ts)
std::string join(char delimiter, const Ts&... ts) { {
std::string s; std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...)); s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...); join_impl(s, delimiter, ts...);
@ -51,8 +59,8 @@ std::string join(char delimiter, const Ts&... ts) {
} }
} // namespace } // namespace
std::string BenchmarkName::str() const { std::string BenchmarkName::str() const
return join('/', function_name, args, min_time, iterations, repetitions, {
time_type, threads); return join('/', function_name, args, min_time, iterations, repetitions, time_type, threads);
} }
} // namespace benchmark } // namespace benchmark

View File

@ -52,9 +52,11 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
namespace { namespace
{
// For non-dense Range, intermediate values are powers of kRangeMultiplier. // For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8; static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat // The size of a benchmark family determines is the number of inputs to repeat
@ -62,7 +64,8 @@ static const int kRangeMultiplier = 8;
static const size_t kMaxFamilySize = 100; static const size_t kMaxFamilySize = 100;
} // end namespace } // end namespace
namespace internal { namespace internal
{
//=============================================================================// //=============================================================================//
// BenchmarkFamilies // BenchmarkFamilies
@ -70,9 +73,10 @@ namespace internal {
// Class for managing registered benchmarks. Note that each registered // Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run. // benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies { class BenchmarkFamilies
{
public: public:
static BenchmarkFamilies* GetInstance(); static BenchmarkFamilies *GetInstance();
// Registers a benchmark family and returns the index assigned to it. // Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family); size_t AddBenchmark(std::unique_ptr<Benchmark> family);
@ -82,49 +86,54 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified // Extract the list of benchmark instances that match the specified
// regular expression. // regular expression.
bool FindBenchmarks(std::string re, bool FindBenchmarks(std::string re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
private: private:
BenchmarkFamilies() {} BenchmarkFamilies()
{
}
std::vector<std::unique_ptr<Benchmark>> families_; std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_; Mutex mutex_;
}; };
BenchmarkFamilies* BenchmarkFamilies::GetInstance() { BenchmarkFamilies *BenchmarkFamilies::GetInstance()
{
static BenchmarkFamilies instance; static BenchmarkFamilies instance;
return &instance; return &instance;
} }
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) { size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family)
{
MutexLock l(mutex_); MutexLock l(mutex_);
size_t index = families_.size(); size_t index = families_.size();
families_.push_back(std::move(family)); families_.push_back(std::move(family));
return index; return index;
} }
void BenchmarkFamilies::ClearBenchmarks() { void BenchmarkFamilies::ClearBenchmarks()
{
MutexLock l(mutex_); MutexLock l(mutex_);
families_.clear(); families_.clear();
families_.shrink_to_fit(); families_.shrink_to_fit();
} }
bool BenchmarkFamilies::FindBenchmarks( bool BenchmarkFamilies::FindBenchmarks(std::string spec, std::vector<BenchmarkInstance> *benchmarks,
std::string spec, std::vector<BenchmarkInstance>* benchmarks, std::ostream *ErrStream)
std::ostream* ErrStream) { {
CHECK(ErrStream); CHECK(ErrStream);
auto& Err = *ErrStream; auto &Err = *ErrStream;
// Make regular expression out of command-line flag // Make regular expression out of command-line flag
std::string error_msg; std::string error_msg;
Regex re; Regex re;
bool isNegativeFilter = false; bool isNegativeFilter = false;
if (spec[0] == '-') { if (spec[0] == '-')
{
spec.replace(0, 1, ""); spec.replace(0, 1, "");
isNegativeFilter = true; isNegativeFilter = true;
} }
if (!re.Init(spec, &error_msg)) { if (!re.Init(spec, &error_msg))
{
Err << "Could not compile benchmark re: " << error_msg << std::endl; Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false; return false;
} }
@ -133,30 +142,36 @@ bool BenchmarkFamilies::FindBenchmarks(
const std::vector<int> one_thread = {1}; const std::vector<int> one_thread = {1};
MutexLock l(mutex_); MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) { for (std::unique_ptr<Benchmark> &family : families_)
{
// Family was deleted or benchmark doesn't match // Family was deleted or benchmark doesn't match
if (!family) continue; if (!family)
continue;
if (family->ArgsCnt() == -1) { if (family->ArgsCnt() == -1)
{
family->Args({}); family->Args({});
} }
const std::vector<int>* thread_counts = const std::vector<int> *thread_counts =
(family->thread_counts_.empty() (family->thread_counts_.empty() ? &one_thread
? &one_thread : &static_cast<const std::vector<int> &>(family->thread_counts_));
: &static_cast<const std::vector<int>&>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size(); const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs. // The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user. // If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) { if (family_size > kMaxFamilySize)
Err << "The number of inputs is very large. " << family->name_ {
<< " will be repeated at least " << family_size << " times.\n"; Err << "The number of inputs is very large. " << family->name_ << " will be repeated at least "
<< family_size << " times.\n";
} }
// reserve in the special case the regex ".", since we know the final // reserve in the special case the regex ".", since we know the final
// family size. // family size.
if (spec == ".") benchmarks->reserve(family_size); if (spec == ".")
benchmarks->reserve(family_size);
for (auto const& args : family->args_) { for (auto const &args : family->args_)
for (int num_threads : *thread_counts) { {
for (int num_threads : *thread_counts)
{
BenchmarkInstance instance; BenchmarkInstance instance;
instance.name.function_name = family->name_; instance.name.function_name = family->name_;
instance.benchmark = family.get(); instance.benchmark = family.get();
@ -177,14 +192,18 @@ bool BenchmarkFamilies::FindBenchmarks(
// Add arguments to instance name // Add arguments to instance name
size_t arg_i = 0; size_t arg_i = 0;
for (auto const& arg : args) { for (auto const &arg : args)
if (!instance.name.args.empty()) { {
if (!instance.name.args.empty())
{
instance.name.args += '/'; instance.name.args += '/';
} }
if (arg_i < family->arg_names_.size()) { if (arg_i < family->arg_names_.size())
const auto& arg_name = family->arg_names_[arg_i]; {
if (!arg_name.empty()) { const auto &arg_name = family->arg_names_[arg_i];
if (!arg_name.empty())
{
instance.name.args += StrFormat("%s:", arg_name.c_str()); instance.name.args += StrFormat("%s:", arg_name.c_str());
} }
} }
@ -194,41 +213,46 @@ bool BenchmarkFamilies::FindBenchmarks(
} }
if (!IsZero(family->min_time_)) if (!IsZero(family->min_time_))
instance.name.min_time = instance.name.min_time = StrFormat("min_time:%0.3f", family->min_time_);
StrFormat("min_time:%0.3f", family->min_time_); if (family->iterations_ != 0)
if (family->iterations_ != 0) { {
instance.name.iterations = instance.name.iterations =
StrFormat("iterations:%lu", StrFormat("iterations:%lu", static_cast<unsigned long>(family->iterations_));
static_cast<unsigned long>(family->iterations_));
} }
if (family->repetitions_ != 0) if (family->repetitions_ != 0)
instance.name.repetitions = instance.name.repetitions = StrFormat("repeats:%d", family->repetitions_);
StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_) { if (family->measure_process_cpu_time_)
{
instance.name.time_type = "process_time"; instance.name.time_type = "process_time";
} }
if (family->use_manual_time_) { if (family->use_manual_time_)
if (!instance.name.time_type.empty()) { {
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/'; instance.name.time_type += '/';
} }
instance.name.time_type += "manual_time"; instance.name.time_type += "manual_time";
} else if (family->use_real_time_) { }
if (!instance.name.time_type.empty()) { else if (family->use_real_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/'; instance.name.time_type += '/';
} }
instance.name.time_type += "real_time"; instance.name.time_type += "real_time";
} }
// Add the number of threads used to the name // Add the number of threads used to the name
if (!family->thread_counts_.empty()) { if (!family->thread_counts_.empty())
{
instance.name.threads = StrFormat("threads:%d", instance.threads); instance.name.threads = StrFormat("threads:%d", instance.threads);
} }
const auto full_name = instance.name.str(); const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) || if ((re.Match(full_name) && !isNegativeFilter) || (!re.Match(full_name) && isNegativeFilter))
(!re.Match(full_name) && isNegativeFilter)) { {
instance.last_benchmark_instance = (&args == &family->args_.back()); instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance)); benchmarks->push_back(std::move(instance));
} }
@ -238,18 +262,18 @@ bool BenchmarkFamilies::FindBenchmarks(
return true; return true;
} }
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { Benchmark *RegisterBenchmarkInternal(Benchmark *bench)
{
std::unique_ptr<Benchmark> bench_ptr(bench); std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies* families = BenchmarkFamilies::GetInstance(); BenchmarkFamilies *families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr)); families->AddBenchmark(std::move(bench_ptr));
return bench; return bench;
} }
// FIXME: This function is a hack so that benchmark.cc can access // FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies` // `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re, bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err)
std::vector<BenchmarkInstance>* benchmarks, {
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
} }
@ -257,73 +281,76 @@ bool FindBenchmarksInternal(const std::string& re,
// Benchmark // Benchmark
//=============================================================================// //=============================================================================//
Benchmark::Benchmark(const char* name) Benchmark::Benchmark(const char *name)
: name_(name), : name_(name), aggregation_report_mode_(ARM_Unspecified), time_unit_(kNanosecond),
aggregation_report_mode_(ARM_Unspecified), range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), repetitions_(0),
time_unit_(kNanosecond), measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone),
range_multiplier_(kRangeMultiplier), complexity_lambda_(nullptr)
min_time_(0), {
iterations_(0),
repetitions_(0),
measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {
ComputeStatistics("mean", StatisticsMean); ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian); ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev); ComputeStatistics("stddev", StatisticsStdDev);
} }
Benchmark::~Benchmark() {} Benchmark::~Benchmark()
{
}
Benchmark* Benchmark::Arg(int64_t x) { Benchmark *Benchmark::Arg(int64_t x)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x}); args_.push_back({x});
return this; return this;
} }
Benchmark* Benchmark::Unit(TimeUnit unit) { Benchmark *Benchmark::Unit(TimeUnit unit)
{
time_unit_ = unit; time_unit_ = unit;
return this; return this;
} }
Benchmark* Benchmark::Range(int64_t start, int64_t limit) { Benchmark *Benchmark::Range(int64_t start, int64_t limit)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist; std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_); AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist) { for (int64_t i : arglist)
{
args_.push_back({i}); args_.push_back({i});
} }
return this; return this;
} }
Benchmark* Benchmark::Ranges( Benchmark *Benchmark::Ranges(const std::vector<std::pair<int64_t, int64_t>> &ranges)
const std::vector<std::pair<int64_t, int64_t>>& ranges) { {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size())); CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size()); std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1; std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) { for (std::size_t i = 0; i < ranges.size(); i++)
AddRange(&arglists[i], ranges[i].first, ranges[i].second, {
range_multiplier_); AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_);
total *= arglists[i].size(); total *= arglists[i].size();
} }
std::vector<std::size_t> ctr(arglists.size(), 0); std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++) { for (std::size_t i = 0; i < total; i++)
{
std::vector<int64_t> tmp; std::vector<int64_t> tmp;
tmp.reserve(arglists.size()); tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) { for (std::size_t j = 0; j < arglists.size(); j++)
{
tmp.push_back(arglists[j].at(ctr[j])); tmp.push_back(arglists[j].at(ctr[j]));
} }
args_.push_back(std::move(tmp)); args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++) { for (std::size_t j = 0; j < arglists.size(); j++)
if (ctr[j] + 1 < arglists[j].size()) { {
if (ctr[j] + 1 < arglists[j].size())
{
++ctr[j]; ++ctr[j];
break; break;
} }
@ -333,129 +360,148 @@ Benchmark* Benchmark::Ranges(
return this; return this;
} }
Benchmark* Benchmark::ArgName(const std::string& name) { Benchmark *Benchmark::ArgName(const std::string &name)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name}; arg_names_ = {name};
return this; return this;
} }
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) { Benchmark *Benchmark::ArgNames(const std::vector<std::string> &names)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size())); CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names; arg_names_ = names;
return this; return this;
} }
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { Benchmark *Benchmark::DenseRange(int64_t start, int64_t limit, int step)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit); CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) { for (int64_t arg = start; arg <= limit; arg += step)
{
args_.push_back({arg}); args_.push_back({arg});
} }
return this; return this;
} }
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) { Benchmark *Benchmark::Args(const std::vector<int64_t> &args)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size())); CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args); args_.push_back(args);
return this; return this;
} }
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { Benchmark *Benchmark::Apply(void (*custom_arguments)(Benchmark *benchmark))
{
custom_arguments(this); custom_arguments(this);
return this; return this;
} }
Benchmark* Benchmark::RangeMultiplier(int multiplier) { Benchmark *Benchmark::RangeMultiplier(int multiplier)
{
CHECK(multiplier > 1); CHECK(multiplier > 1);
range_multiplier_ = multiplier; range_multiplier_ = multiplier;
return this; return this;
} }
Benchmark* Benchmark::MinTime(double t) { Benchmark *Benchmark::MinTime(double t)
{
CHECK(t > 0.0); CHECK(t > 0.0);
CHECK(iterations_ == 0); CHECK(iterations_ == 0);
min_time_ = t; min_time_ = t;
return this; return this;
} }
Benchmark* Benchmark::Iterations(IterationCount n) { Benchmark *Benchmark::Iterations(IterationCount n)
{
CHECK(n > 0); CHECK(n > 0);
CHECK(IsZero(min_time_)); CHECK(IsZero(min_time_));
iterations_ = n; iterations_ = n;
return this; return this;
} }
Benchmark* Benchmark::Repetitions(int n) { Benchmark *Benchmark::Repetitions(int n)
{
CHECK(n > 0); CHECK(n > 0);
repetitions_ = n; repetitions_ = n;
return this; return this;
} }
Benchmark* Benchmark::ReportAggregatesOnly(bool value) { Benchmark *Benchmark::ReportAggregatesOnly(bool value)
{
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this; return this;
} }
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { Benchmark *Benchmark::DisplayAggregatesOnly(bool value)
{
// If we were called, the report mode is no longer 'unspecified', in any case. // If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>( aggregation_report_mode_ = static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_Default);
aggregation_report_mode_ | ARM_Default);
if (value) { if (value)
aggregation_report_mode_ = static_cast<AggregationReportMode>( {
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); aggregation_report_mode_ =
} else { static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
aggregation_report_mode_ = static_cast<AggregationReportMode>( }
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); else
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
} }
return this; return this;
} }
Benchmark* Benchmark::MeasureProcessCPUTime() { Benchmark *Benchmark::MeasureProcessCPUTime()
{
// Can be used together with UseRealTime() / UseManualTime(). // Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true; measure_process_cpu_time_ = true;
return this; return this;
} }
Benchmark* Benchmark::UseRealTime() { Benchmark *Benchmark::UseRealTime()
CHECK(!use_manual_time_) {
<< "Cannot set UseRealTime and UseManualTime simultaneously."; CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true; use_real_time_ = true;
return this; return this;
} }
Benchmark* Benchmark::UseManualTime() { Benchmark *Benchmark::UseManualTime()
CHECK(!use_real_time_) {
<< "Cannot set UseRealTime and UseManualTime simultaneously."; CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true; use_manual_time_ = true;
return this; return this;
} }
Benchmark* Benchmark::Complexity(BigO complexity) { Benchmark *Benchmark::Complexity(BigO complexity)
{
complexity_ = complexity; complexity_ = complexity;
return this; return this;
} }
Benchmark* Benchmark::Complexity(BigOFunc* complexity) { Benchmark *Benchmark::Complexity(BigOFunc *complexity)
{
complexity_lambda_ = complexity; complexity_lambda_ = complexity;
complexity_ = oLambda; complexity_ = oLambda;
return this; return this;
} }
Benchmark* Benchmark::ComputeStatistics(std::string name, Benchmark *Benchmark::ComputeStatistics(std::string name, StatisticsFunc *statistics)
StatisticsFunc* statistics) { {
statistics_.emplace_back(name, statistics); statistics_.emplace_back(name, statistics);
return this; return this;
} }
Benchmark* Benchmark::Threads(int t) { Benchmark *Benchmark::Threads(int t)
{
CHECK_GT(t, 0); CHECK_GT(t, 0);
thread_counts_.push_back(t); thread_counts_.push_back(t);
return this; return this;
} }
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { Benchmark *Benchmark::ThreadRange(int min_threads, int max_threads)
{
CHECK_GT(min_threads, 0); CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads); CHECK_GE(max_threads, min_threads);
@ -463,29 +509,37 @@ Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
return this; return this;
} }
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, Benchmark *Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride)
int stride) { {
CHECK_GT(min_threads, 0); CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads); CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1); CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) { for (auto i = min_threads; i < max_threads; i += stride)
{
thread_counts_.push_back(i); thread_counts_.push_back(i);
} }
thread_counts_.push_back(max_threads); thread_counts_.push_back(max_threads);
return this; return this;
} }
Benchmark* Benchmark::ThreadPerCpu() { Benchmark *Benchmark::ThreadPerCpu()
{
thread_counts_.push_back(CPUInfo::Get().num_cpus); thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this; return this;
} }
void Benchmark::SetName(const char* name) { name_ = name; } void Benchmark::SetName(const char *name)
{
name_ = name;
}
int Benchmark::ArgsCnt() const { int Benchmark::ArgsCnt() const
if (args_.empty()) { {
if (arg_names_.empty()) return -1; if (args_.empty())
{
if (arg_names_.empty())
return -1;
return static_cast<int>(arg_names_.size()); return static_cast<int>(arg_names_.size());
} }
return static_cast<int>(args_.front().size()); return static_cast<int>(args_.front().size());
@ -495,11 +549,15 @@ int Benchmark::ArgsCnt() const {
// FunctionBenchmark // FunctionBenchmark
//=============================================================================// //=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); } void FunctionBenchmark::Run(State &st)
{
func_(st);
}
} // end namespace internal } // end namespace internal
void ClearRegisteredBenchmarks() { void ClearRegisteredBenchmarks()
{
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
} }

View File

@ -5,14 +5,15 @@
#include "check.h" #include "check.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
// Append the powers of 'mult' in the closed interval [lo, hi]. // Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range. // Returns iterator to the start of the inserted range.
template <typename T> template <typename T> typename std::vector<T>::iterator AddPowers(std::vector<T> *dst, T lo, T hi, int mult)
typename std::vector<T>::iterator {
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0); CHECK_GE(lo, 0);
CHECK_GE(hi, lo); CHECK_GE(hi, lo);
CHECK_GE(mult, 2); CHECK_GE(mult, 2);
@ -22,20 +23,23 @@ AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
static const T kmax = std::numeric_limits<T>::max(); static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult" // Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult) { for (T i = 1; i <= hi; i *= mult)
if (i >= lo) { {
if (i >= lo)
{
dst->push_back(i); dst->push_back(i);
} }
// Break the loop here since multiplying by // Break the loop here since multiplying by
// 'mult' would move outside of the range of T // 'mult' would move outside of the range of T
if (i > kmax / mult) break; if (i > kmax / mult)
break;
} }
return dst->begin() + start_offset; return dst->begin() + start_offset;
} }
template <typename T> template <typename T> void AddNegatedPowers(std::vector<T> *dst, T lo, T hi, int mult)
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) { {
// We negate lo and hi so we require that they cannot be equal to 'min'. // We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min()); CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min()); CHECK_GT(hi, std::numeric_limits<T>::min());
@ -50,14 +54,13 @@ void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
const auto it = AddPowers(dst, hi_complement, lo_complement, mult); const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T& t) { t *= -1; }); std::for_each(it, dst->end(), [](T &t) { t *= -1; });
std::reverse(it, dst->end()); std::reverse(it, dst->end());
} }
template <typename T> template <typename T> void AddRange(std::vector<T> *dst, T lo, T hi, int mult)
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) { {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value, static_assert(std::is_integral<T>::value && std::is_signed<T>::value, "Args type must be a signed integer");
"Args type must be a signed integer");
CHECK_GE(hi, lo); CHECK_GE(hi, lo);
CHECK_GE(mult, 2); CHECK_GE(mult, 2);
@ -68,10 +71,12 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
// Handle lo == hi as a special case, so we then know // Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1 // lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T. // from hi without falling outside of the range of T.
if (lo == hi) return; if (lo == hi)
return;
// Ensure that lo_inner <= hi_inner below. // Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi) { if (lo + 1 == hi)
{
dst->push_back(hi); dst->push_back(hi);
return; return;
} }
@ -81,22 +86,26 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
const auto hi_inner = static_cast<T>(hi - 1); const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values // Insert negative values
if (lo_inner < 0) { if (lo_inner < 0)
{
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
} }
// Treat 0 as a special case (see discussion on #762). // Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0) { if (lo <= 0 && hi >= 0)
{
dst->push_back(0); dst->push_back(0);
} }
// Insert positive values // Insert positive values
if (hi_inner > 0) { if (hi_inner > 0)
{
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
} }
// Add "hi" (if different from last value). // Add "hi" (if different from last value).
if (hi != dst->back()) { if (hi != dst->back())
{
dst->push_back(hi); dst->push_back(hi);
} }
} }

View File

@ -51,22 +51,24 @@
#include "thread_manager.h" #include "thread_manager.h"
#include "thread_timer.h" #include "thread_timer.h"
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
MemoryManager* memory_manager = nullptr; MemoryManager *memory_manager = nullptr;
namespace { namespace
{
static constexpr IterationCount kMaxIterations = 1000000000; static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport( BenchmarkReporter::Run CreateRunReport(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, const internal::ThreadManager::Result &results, IterationCount memory_iterations,
const internal::ThreadManager::Result& results, const MemoryManager::Result &memory_result, double seconds,
IterationCount memory_iterations, int64_t repetition_index)
const MemoryManager::Result& memory_result, double seconds, {
int64_t repetition_index) {
// Create report about this benchmark run. // Create report about this benchmark run.
BenchmarkReporter::Run report; BenchmarkReporter::Run report;
@ -81,10 +83,14 @@ BenchmarkReporter::Run CreateRunReport(
report.repetition_index = repetition_index; report.repetition_index = repetition_index;
report.repetitions = b.repetitions; report.repetitions = b.repetitions;
if (!report.error_occurred) { if (!report.error_occurred)
if (b.use_manual_time) { {
if (b.use_manual_time)
{
report.real_accumulated_time = results.manual_time_used; report.real_accumulated_time = results.manual_time_used;
} else { }
else
{
report.real_accumulated_time = results.real_time_used; report.real_accumulated_time = results.real_time_used;
} }
report.cpu_accumulated_time = results.cpu_time_used; report.cpu_accumulated_time = results.cpu_time_used;
@ -94,12 +100,11 @@ BenchmarkReporter::Run CreateRunReport(
report.statistics = b.statistics; report.statistics = b.statistics;
report.counters = results.counters; report.counters = results.counters;
if (memory_iterations > 0) { if (memory_iterations > 0)
{
report.has_memory_result = true; report.has_memory_result = true;
report.allocs_per_iter = report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations : 0;
memory_iterations
: 0;
report.max_bytes_used = memory_result.max_bytes_used; report.max_bytes_used = memory_result.max_bytes_used;
} }
@ -110,18 +115,16 @@ BenchmarkReporter::Run CreateRunReport(
// Execute one thread of benchmark b for the specified number of iterations. // Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total. // Adds the stats collected for the thread into *total.
void RunInThread(const BenchmarkInstance* b, IterationCount iters, void RunInThread(const BenchmarkInstance *b, IterationCount iters, int thread_id, ThreadManager *manager)
int thread_id, ThreadManager* manager) { {
internal::ThreadTimer timer( internal::ThreadTimer timer(b->measure_process_cpu_time ? internal::ThreadTimer::CreateProcessCpuTime()
b->measure_process_cpu_time
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create()); : internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager); State st = b->Run(iters, thread_id, &timer, manager);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!"; << "Benchmark returned before State::KeepRunning() returned false!";
{ {
MutexLock l(manager->GetBenchmarkMutex()); MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result& results = manager->results; internal::ThreadManager::Result &results = manager->results;
results.iterations += st.iterations(); results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used(); results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used(); results.real_time_used += timer.real_time_used();
@ -132,32 +135,30 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
manager->NotifyThreadComplete(); manager->NotifyThreadComplete();
} }
class BenchmarkRunner { class BenchmarkRunner
{
public: public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, BenchmarkRunner(const benchmark::internal::BenchmarkInstance &b_,
std::vector<BenchmarkReporter::Run>* complexity_reports_) std::vector<BenchmarkReporter::Run> *complexity_reports_)
: b(b_), : b(b_), complexity_reports(*complexity_reports_),
complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time), min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions repeats(b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions),
: FLAGS_benchmark_repetitions), has_explicit_iteration_count(b.iterations != 0), pool(b.threads - 1),
has_explicit_iteration_count(b.iterations != 0), iters(has_explicit_iteration_count ? b.iterations : 1)
pool(b.threads - 1), {
iters(has_explicit_iteration_count ? b.iterations : 1) {
run_results.display_report_aggregates_only = run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only || (FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only);
FLAGS_benchmark_display_aggregates_only); run_results.file_report_aggregates_only = FLAGS_benchmark_report_aggregates_only;
run_results.file_report_aggregates_only = if (b.aggregation_report_mode != internal::ARM_Unspecified)
FLAGS_benchmark_report_aggregates_only; {
if (b.aggregation_report_mode != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only = run_results.display_report_aggregates_only =
(b.aggregation_report_mode & (b.aggregation_report_mode & internal::ARM_DisplayReportAggregatesOnly);
internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only = run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly); (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
} }
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { for (int repetition_num = 0; repetition_num < repeats; repetition_num++)
{
DoOneRepetition(repetition_num); DoOneRepetition(repetition_num);
} }
@ -165,22 +166,25 @@ class BenchmarkRunner {
run_results.aggregates_only = ComputeStats(run_results.non_aggregates); run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report // Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance) { if ((b.complexity != oNone) && b.last_benchmark_instance)
{
auto additional_run_stats = ComputeBigO(complexity_reports); auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(), run_results.aggregates_only.insert(run_results.aggregates_only.end(), additional_run_stats.begin(),
additional_run_stats.begin(),
additional_run_stats.end()); additional_run_stats.end());
complexity_reports.clear(); complexity_reports.clear();
} }
} }
RunResults&& get_results() { return std::move(run_results); } RunResults &&get_results()
{
return std::move(run_results);
}
private: private:
RunResults run_results; RunResults run_results;
const benchmark::internal::BenchmarkInstance& b; const benchmark::internal::BenchmarkInstance &b;
std::vector<BenchmarkReporter::Run>& complexity_reports; std::vector<BenchmarkReporter::Run> &complexity_reports;
const double min_time; const double min_time;
const int repeats; const int repeats;
@ -192,21 +196,23 @@ class BenchmarkRunner {
// So only the first repetition has to find/calculate it, // So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count. // the other repetitions will just use that precomputed iteration count.
struct IterationResults { struct IterationResults
{
internal::ThreadManager::Result results; internal::ThreadManager::Result results;
IterationCount iters; IterationCount iters;
double seconds; double seconds;
}; };
IterationResults DoNIterations() { IterationResults DoNIterations()
{
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n"; VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager; std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads)); manager.reset(new internal::ThreadManager(b.threads));
// Run all but one thread in separate threads // Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti) { for (std::size_t ti = 0; ti < pool.size(); ++ti)
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), {
manager.get()); pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), manager.get());
} }
// And run one thread here directly. // And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.) // (If we were asked to run just one thread, we don't create new threads.)
@ -215,7 +221,8 @@ class BenchmarkRunner {
// The main thread has finished. Now let's wait for the other threads. // The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads(); manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join(); for (std::thread &thread : pool)
thread.join();
IterationResults i; IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK! // Acquire the measurements/counters from the manager, UNDER THE LOCK!
@ -231,25 +238,29 @@ class BenchmarkRunner {
i.results.real_time_used /= b.threads; i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads; i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too. // If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads; if (b.measure_process_cpu_time)
i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n";
<< i.results.real_time_used << "\n";
// So for how long were we running? // So for how long were we running?
i.iters = iters; i.iters = iters;
// Base decisions off of real time if requested by this benchmark. // Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used; i.seconds = i.results.cpu_time_used;
if (b.use_manual_time) { if (b.use_manual_time)
{
i.seconds = i.results.manual_time_used; i.seconds = i.results.manual_time_used;
} else if (b.use_real_time) { }
else if (b.use_real_time)
{
i.seconds = i.results.real_time_used; i.seconds = i.results.real_time_used;
} }
return i; return i;
} }
IterationCount PredictNumItersNeeded(const IterationResults& i) const { IterationCount PredictNumItersNeeded(const IterationResults &i) const
{
// See how much iterations should be increased by. // See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns). // Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9); double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
@ -260,12 +271,12 @@ class BenchmarkRunner {
// expansion should be 14x. // expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1; bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier); multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0; if (multiplier <= 1.0)
multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up. // So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>( const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters), std::lround(std::max(multiplier * static_cast<double>(i.iters), static_cast<double>(i.iters) + 1.0)));
static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though.. // But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
@ -273,12 +284,12 @@ class BenchmarkRunner {
return next_iters; // round up before conversion to integer. return next_iters; // round up before conversion to integer.
} }
bool ShouldReportIterationResults(const IterationResults& i) const { bool ShouldReportIterationResults(const IterationResults &i) const
{
// Determine if this run should be reported; // Determine if this run should be reported;
// Either it has run for a sufficient amount of time // Either it has run for a sufficient amount of time
// or because an error was reported. // or because an error was reported.
return i.results.has_error_ || return i.results.has_error_ || i.iters >= kMaxIterations || // Too many iterations already.
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough. i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds // CPU time is specified but the elapsed real time greatly exceeds
// the minimum time. // the minimum time.
@ -286,7 +297,8 @@ class BenchmarkRunner {
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time); ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
} }
void DoOneRepetition(int64_t repetition_index) { void DoOneRepetition(int64_t repetition_index)
{
const bool is_the_first_repetition = repetition_index == 0; const bool is_the_first_repetition = repetition_index == 0;
IterationResults i; IterationResults i;
@ -296,7 +308,8 @@ class BenchmarkRunner {
// Please do note that the if there are repetitions, the iteration count // Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions // is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count. // simply use that precomputed iteration count.
for (;;) { for (;;)
{
i = DoNIterations(); i = DoNIterations();
// Do we consider the results to be significant? // Do we consider the results to be significant?
@ -304,25 +317,25 @@ class BenchmarkRunner {
// it has calculated the correct iteration time, so we have run that very // it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report. // iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply. // Else, the normal rules apply.
const bool results_are_significant = !is_the_first_repetition || const bool results_are_significant =
has_explicit_iteration_count || !is_the_first_repetition || has_explicit_iteration_count || ShouldReportIterationResults(i);
ShouldReportIterationResults(i);
if (results_are_significant) break; // Good, let's report them! if (results_are_significant)
break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again... // iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i); iters = PredictNumItersNeeded(i);
assert(iters > i.iters && assert(iters > i.iters && "if we did more iterations than we want to do the next time, "
"if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run."); "then we should have accepted the current iteration run.");
} }
// Oh, one last thing, we need to also produce the 'memory measurements'.. // Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result; MemoryManager::Result memory_result;
IterationCount memory_iterations = 0; IterationCount memory_iterations = 0;
if (memory_manager != nullptr) { if (memory_manager != nullptr)
{
// Only run a few iterations to reduce the impact of one-time // Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed. // allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters); memory_iterations = std::min<IterationCount>(16, iters);
@ -338,8 +351,7 @@ class BenchmarkRunner {
// Ok, now actualy report. // Ok, now actualy report.
BenchmarkReporter::Run report = BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result, CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, repetition_index);
i.seconds, repetition_index);
if (!report.error_occurred && b.complexity != oNone) if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report); complexity_reports.push_back(report);
@ -350,9 +362,9 @@ class BenchmarkRunner {
} // end namespace } // end namespace
RunResults RunBenchmark( RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, std::vector<BenchmarkReporter::Run> *complexity_reports)
std::vector<BenchmarkReporter::Run>* complexity_reports) { {
internal::BenchmarkRunner r(b, complexity_reports); internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results(); return r.get_results();
} }

View File

@ -26,13 +26,16 @@ DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only); DECLARE_bool(benchmark_display_aggregates_only);
namespace benchmark { namespace benchmark
{
namespace internal { namespace internal
{
extern MemoryManager* memory_manager; extern MemoryManager *memory_manager;
struct RunResults { struct RunResults
{
std::vector<BenchmarkReporter::Run> non_aggregates; std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only; std::vector<BenchmarkReporter::Run> aggregates_only;
@ -40,9 +43,8 @@ struct RunResults {
bool file_report_aggregates_only = false; bool file_report_aggregates_only = false;
}; };
RunResults RunBenchmark( RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
const benchmark::internal::BenchmarkInstance& b, std::vector<BenchmarkReporter::Run> *complexity_reports);
std::vector<BenchmarkReporter::Run>* complexity_reports);
} // namespace internal } // namespace internal

View File

@ -8,44 +8,52 @@
#include "internal_macros.h" #include "internal_macros.h"
#include "log.h" #include "log.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
typedef void(AbortHandlerT)(); typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() { inline AbortHandlerT *&GetAbortHandler()
static AbortHandlerT* handler = &std::abort; {
static AbortHandlerT *handler = &std::abort;
return handler; return handler;
} }
BENCHMARK_NORETURN inline void CallAbortHandler() { BENCHMARK_NORETURN inline void CallAbortHandler()
{
GetAbortHandler()(); GetAbortHandler()();
std::abort(); // fallback to enforce noreturn std::abort(); // fallback to enforce noreturn
} }
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler // CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed. // will log information about the failures and abort when it is destructed.
class CheckHandler { class CheckHandler
{
public: public:
CheckHandler(const char* check, const char* file, const char* func, int line) CheckHandler(const char *check, const char *file, const char *func, int line) : log_(GetErrorLogInstance())
: log_(GetErrorLogInstance()) { {
log_ << file << ":" << line << ": " << func << ": Check `" << check log_ << file << ":" << line << ": " << func << ": Check `" << check << "' failed. ";
<< "' failed. ";
} }
LogType& GetLog() { return log_; } LogType &GetLog()
{
return log_;
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false)
{
log_ << std::endl; log_ << std::endl;
CallAbortHandler(); CallAbortHandler();
} }
CheckHandler& operator=(const CheckHandler&) = delete; CheckHandler &operator=(const CheckHandler &) = delete;
CheckHandler(const CheckHandler&) = delete; CheckHandler(const CheckHandler &) = delete;
CheckHandler() = delete; CheckHandler() = delete;
private: private:
LogType& log_; LogType &log_;
}; };
} // end namespace internal } // end namespace internal
@ -56,8 +64,7 @@ class CheckHandler {
#ifndef NDEBUG #ifndef NDEBUG
#define CHECK(b) \ #define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \ (b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__).GetLog())
.GetLog())
#else #else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance() #define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif #endif

View File

@ -25,23 +25,27 @@
#include "internal_macros.h" #include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#include <io.h> #include <io.h>
#include <windows.h>
#else #else
#include <unistd.h> #include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS
namespace benchmark { namespace benchmark
namespace { {
namespace
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode; typedef WORD PlatformColorCode;
#else #else
typedef const char* PlatformColorCode; typedef const char *PlatformColorCode;
#endif #endif
PlatformColorCode GetPlatformColorCode(LogColor color) { PlatformColorCode GetPlatformColorCode(LogColor color)
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
switch (color) { switch (color)
{
case COLOR_RED: case COLOR_RED:
return FOREGROUND_RED; return FOREGROUND_RED;
case COLOR_GREEN: case COLOR_GREEN:
@ -59,7 +63,8 @@ PlatformColorCode GetPlatformColorCode(LogColor color) {
return 0; return 0;
} }
#else #else
switch (color) { switch (color)
{
case COLOR_RED: case COLOR_RED:
return "1"; return "1";
case COLOR_GREEN: case COLOR_GREEN:
@ -82,7 +87,8 @@ PlatformColorCode GetPlatformColorCode(LogColor color) {
} // end namespace } // end namespace
std::string FormatString(const char* msg, va_list args) { std::string FormatString(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy // we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp; va_list args_cp;
va_copy(args_cp, args); va_copy(args_cp, args);
@ -100,7 +106,8 @@ std::string FormatString(const char* msg, va_list args) {
return {}; return {};
else if (static_cast<size_t>(ret) < size) else if (static_cast<size_t>(ret) < size)
return local_buff; return local_buff;
else { else
{
// we did not provide a long enough buffer on our first attempt. // we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]); std::unique_ptr<char[]> buff(new char[size]);
@ -110,7 +117,8 @@ std::string FormatString(const char* msg, va_list args) {
} }
} }
std::string FormatString(const char* msg, ...) { std::string FormatString(const char *msg, ...)
{
va_list args; va_list args;
va_start(args, msg); va_start(args, msg);
auto tmp = FormatString(msg, args); auto tmp = FormatString(msg, args);
@ -118,15 +126,16 @@ std::string FormatString(const char* msg, ...) {
return tmp; return tmp;
} }
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...)
{
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
ColorPrintf(out, color, fmt, args); ColorPrintf(out, color, fmt, args);
va_end(args); va_end(args);
} }
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args)
va_list args) { {
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning ((void)out); // suppress unused warning
@ -141,21 +150,22 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
// SetConsoleTextAttribute call lest it affect the text that is already // SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console. // printed but has not yet reached the console.
fflush(stdout); fflush(stdout);
SetConsoleTextAttribute(stdout_handle, SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args); vprintf(fmt, args);
fflush(stdout); fflush(stdout);
// Restores the text color. // Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs); SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else #else
const char* color_code = GetPlatformColorCode(color); const char *color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code); if (color_code)
out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m"; out << FormatString(fmt, args) << "\033[m";
#endif #endif
} }
bool IsColorTerminal() { bool IsColorTerminal()
{
#if BENCHMARK_OS_WINDOWS #if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the // On Windows the TERM variable is usually not set, but the
// console there does support colors. // console there does support colors.
@ -164,18 +174,18 @@ bool IsColorTerminal() {
// On non-Windows platforms, we rely on the TERM variable. This list of // On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test: // supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>. // <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char* const SUPPORTED_TERM_VALUES[] = { const char *const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color", "xterm", "xterm-color", "xterm-256color", "screen", "screen-256color", "tmux",
"screen", "screen-256color", "tmux", "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", "linux", "cygwin",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
}; };
const char* const term = getenv("TERM"); const char *const term = getenv("TERM");
bool term_supports_color = false; bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) { for (const char *candidate : SUPPORTED_TERM_VALUES)
if (term && 0 == strcmp(term, candidate)) { {
if (term && 0 == strcmp(term, candidate))
{
term_supports_color = true; term_supports_color = true;
break; break;
} }

View File

@ -5,8 +5,10 @@
#include <iostream> #include <iostream>
#include <string> #include <string>
namespace benchmark { namespace benchmark
enum LogColor { {
enum LogColor
{
COLOR_DEFAULT, COLOR_DEFAULT,
COLOR_RED, COLOR_RED,
COLOR_GREEN, COLOR_GREEN,
@ -17,12 +19,11 @@ enum LogColor {
COLOR_WHITE COLOR_WHITE
}; };
std::string FormatString(const char* msg, va_list args); std::string FormatString(const char *msg, va_list args);
std::string FormatString(const char* msg, ...); std::string FormatString(const char *msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args);
va_list args); void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored // Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise. // output, false otherwise.

View File

@ -21,19 +21,23 @@
#include <iostream> #include <iostream>
#include <limits> #include <limits>
namespace benchmark { namespace benchmark
namespace { {
namespace
{
// Parses 'str' for a 32-bit signed integer. If successful, writes // Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value // the result to *value and returns true; otherwise leaves *value
// unchanged and returns false. // unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { bool ParseInt32(const std::string &src_text, const char *str, int32_t *value)
{
// Parses the environment variable as a decimal integer. // Parses the environment variable as a decimal integer.
char* end = nullptr; char *end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string? // Has strtol() consumed all characters in the string?
if (*end != '\0') { if (*end != '\0')
{
// No - an invalid character was encountered. // No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, " std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n"; << "but actually has value \"" << str << "\".\n";
@ -42,13 +46,13 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Is the parsed value in the range of an Int32? // Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value); const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() || if (long_value == std::numeric_limits<long>::max() || long_value == std::numeric_limits<long>::min() ||
long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns // The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.) // LONG_MAX or LONG_MIN when the input overflows.)
result != long_value result != long_value
// The parsed value overflows as an Int32. // The parsed value overflows as an Int32.
) { )
{
std::cerr << src_text << " is expected to be a 32-bit integer, " std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", " << "but actually has value \"" << str << "\", "
<< "which overflows.\n"; << "which overflows.\n";
@ -61,13 +65,15 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Parses 'str' for a double. If successful, writes the result to *value and // Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false. // returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) { bool ParseDouble(const std::string &src_text, const char *str, double *value)
{
// Parses the environment variable as a decimal integer. // Parses the environment variable as a decimal integer.
char* end = nullptr; char *end = nullptr;
const double double_value = strtod(str, &end); // NOLINT const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string? // Has strtol() consumed all characters in the string?
if (*end != '\0') { if (*end != '\0')
{
// No - an invalid character was encountered. // No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, " std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n"; << "but actually has value \"" << str << "\".\n";
@ -81,7 +87,8 @@ bool ParseDouble(const std::string& src_text, const char* str, double* value) {
// Returns the name of the environment variable corresponding to the // Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return // given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version. // "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) { static std::string FlagToEnvVar(const char *flag)
{
const std::string flag_str(flag); const std::string flag_str(flag);
std::string env_var; std::string env_var;
@ -93,39 +100,41 @@ static std::string FlagToEnvVar(const char* flag) {
} // namespace } // namespace
bool BoolFromEnv(const char* flag, bool default_val) { bool BoolFromEnv(const char *flag, bool default_val)
{
const std::string env_var = FlagToEnvVar(flag); const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str()); const char *const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
} }
int32_t Int32FromEnv(const char* flag, int32_t default_val) { int32_t Int32FromEnv(const char *flag, int32_t default_val)
{
const std::string env_var = FlagToEnvVar(flag); const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str()); const char *const value_str = getenv(env_var.c_str());
int32_t value = default_val; int32_t value = default_val;
if (value_str == nullptr || if (value_str == nullptr || !ParseInt32(std::string("Environment variable ") + env_var, value_str, &value))
!ParseInt32(std::string("Environment variable ") + env_var, value_str, {
&value)) {
return default_val; return default_val;
} }
return value; return value;
} }
double DoubleFromEnv(const char* flag, double default_val) { double DoubleFromEnv(const char *flag, double default_val)
{
const std::string env_var = FlagToEnvVar(flag); const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str()); const char *const value_str = getenv(env_var.c_str());
double value = default_val; double value = default_val;
if (value_str == nullptr || if (value_str == nullptr || !ParseDouble(std::string("Environment variable ") + env_var, value_str, &value))
!ParseDouble(std::string("Environment variable ") + env_var, value_str, {
&value)) {
return default_val; return default_val;
} }
return value; return value;
} }
const char* StringFromEnv(const char* flag, const char* default_val) { const char *StringFromEnv(const char *flag, const char *default_val)
{
const std::string env_var = FlagToEnvVar(flag); const std::string env_var = FlagToEnvVar(flag);
const char* const value = getenv(env_var.c_str()); const char *const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value; return value == nullptr ? default_val : value;
} }
@ -134,94 +143,108 @@ const char* StringFromEnv(const char* flag, const char* default_val) {
// part can be omitted. // part can be omitted.
// //
// Returns the value of the flag, or nullptr if the parsing failed. // Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag, const char *ParseFlagValue(const char *str, const char *flag, bool def_optional)
bool def_optional) { {
// str and flag must not be nullptr. // str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr; if (str == nullptr || flag == nullptr)
return nullptr;
// The flag must start with "--". // The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag); const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length(); const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; if (strncmp(str, flag_str.c_str(), flag_len) != 0)
return nullptr;
// Skips the flag name. // Skips the flag name.
const char* flag_end = str + flag_len; const char *flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part. // When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end; if (def_optional && (flag_end[0] == '\0'))
return flag_end;
// If def_optional is true and there are more characters after the // If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after // flag name, or if def_optional is false, there must be a '=' after
// the flag name. // the flag name.
if (flag_end[0] != '=') return nullptr; if (flag_end[0] != '=')
return nullptr;
// Returns the string after "=". // Returns the string after "=".
return flag_end + 1; return flag_end + 1;
} }
bool ParseBoolFlag(const char* str, const char* flag, bool* value) { bool ParseBoolFlag(const char *str, const char *flag, bool *value)
{
// Gets the value of the flag as a string. // Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true); const char *const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed. // Aborts if the parsing failed.
if (value_str == nullptr) return false; if (value_str == nullptr)
return false;
// Converts the string value to a bool. // Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str); *value = IsTruthyFlagValue(value_str);
return true; return true;
} }
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { bool ParseInt32Flag(const char *str, const char *flag, int32_t *value)
{
// Gets the value of the flag as a string. // Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false); const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed. // Aborts if the parsing failed.
if (value_str == nullptr) return false; if (value_str == nullptr)
return false;
// Sets *value to the value of the flag. // Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str, return ParseInt32(std::string("The value of flag --") + flag, value_str, value);
value);
} }
bool ParseDoubleFlag(const char* str, const char* flag, double* value) { bool ParseDoubleFlag(const char *str, const char *flag, double *value)
{
// Gets the value of the flag as a string. // Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false); const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed. // Aborts if the parsing failed.
if (value_str == nullptr) return false; if (value_str == nullptr)
return false;
// Sets *value to the value of the flag. // Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str, return ParseDouble(std::string("The value of flag --") + flag, value_str, value);
value);
} }
bool ParseStringFlag(const char* str, const char* flag, std::string* value) { bool ParseStringFlag(const char *str, const char *flag, std::string *value)
{
// Gets the value of the flag as a string. // Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false); const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed. // Aborts if the parsing failed.
if (value_str == nullptr) return false; if (value_str == nullptr)
return false;
*value = value_str; *value = value_str;
return true; return true;
} }
bool IsFlag(const char* str, const char* flag) { bool IsFlag(const char *str, const char *flag)
{
return (ParseFlagValue(str, flag, true) != nullptr); return (ParseFlagValue(str, flag, true) != nullptr);
} }
bool IsTruthyFlagValue(const std::string& value) { bool IsTruthyFlagValue(const std::string &value)
if (value.size() == 1) { {
if (value.size() == 1)
{
char v = value[0]; char v = value[0];
return isalnum(v) && return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); }
} else if (!value.empty()) { else if (!value.empty())
{
std::string value_lower(value); std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); }); [](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" || return !(value_lower == "false" || value_lower == "no" || value_lower == "off");
value_lower == "off"); }
} else else
return true; return true;
} }

View File

@ -14,48 +14,41 @@
#define DECLARE_string(name) extern std::string FLAG(name) #define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags. // Macros for defining flags.
#define DEFINE_bool(name, default_val) \ #define DEFINE_bool(name, default_val) bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
bool FLAG(name) = \ #define DEFINE_int32(name, default_val) int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
benchmark::BoolFromEnv(#name, default_val) #define DEFINE_double(name, default_val) double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \ #define DEFINE_string(name, default_val) std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
int32_t FLAG(name) = \
benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
double FLAG(name) = \
benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
std::string FLAG(name) = \
benchmark::StringFromEnv(#name, default_val)
namespace benchmark { namespace benchmark
{
// Parses a bool from the environment variable // Parses a bool from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns IsTruthyFlagValue() value; if not, // If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value. // returns the given default value.
bool BoolFromEnv(const char* flag, bool default_val); bool BoolFromEnv(const char *flag, bool default_val);
// Parses an Int32 from the environment variable // Parses an Int32 from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns ParseInt32() value; if not, returns // If the variable exists, returns ParseInt32() value; if not, returns
// the given default value. // the given default value.
int32_t Int32FromEnv(const char* flag, int32_t default_val); int32_t Int32FromEnv(const char *flag, int32_t default_val);
// Parses an Double from the environment variable // Parses an Double from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If the variable exists, returns ParseDouble(); if not, returns // If the variable exists, returns ParseDouble(); if not, returns
// the given default value. // the given default value.
double DoubleFromEnv(const char* flag, double default_val); double DoubleFromEnv(const char *flag, double default_val);
// Parses a string from the environment variable // Parses a string from the environment variable
// corresponding to the given flag. // corresponding to the given flag.
// //
// If variable exists, returns its value; if not, returns // If variable exists, returns its value; if not, returns
// the given default value. // the given default value.
const char* StringFromEnv(const char* flag, const char* default_val); const char *StringFromEnv(const char *flag, const char *default_val);
// Parses a string for a bool flag, in the form of either // Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag". // "--flag=value" or "--flag".
@ -66,37 +59,37 @@ const char* StringFromEnv(const char* flag, const char* default_val);
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value); bool ParseBoolFlag(const char *str, const char *flag, bool *value);
// Parses a string for an Int32 flag, in the form of // Parses a string for an Int32 flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); bool ParseInt32Flag(const char *str, const char *flag, int32_t *value);
// Parses a string for a Double flag, in the form of // Parses a string for a Double flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value); bool ParseDoubleFlag(const char *str, const char *flag, double *value);
// Parses a string for a string flag, in the form of // Parses a string for a string flag, in the form of
// "--flag=value". // "--flag=value".
// //
// On success, stores the value of the flag in *value, and returns // On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value. // true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value); bool ParseStringFlag(const char *str, const char *flag, std::string *value);
// Returns true if the string matches the flag. // Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag); bool IsFlag(const char *str, const char *flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches // some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also // one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string. // returns true if value is the empty string.
bool IsTruthyFlagValue(const std::string& value); bool IsTruthyFlagValue(const std::string &value);
} // end namespace benchmark } // end namespace benchmark

View File

@ -17,17 +17,20 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h" #include "check.h"
#include "complexity.h" #include "complexity.h"
#include <algorithm>
#include <cmath>
namespace benchmark { namespace benchmark
{
// Internal function to calculate the different scalability forms // Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) { BigOFunc *FittingCurve(BigO complexity)
{
static const double kLog2E = 1.44269504088896340736; static const double kLog2E = 1.44269504088896340736;
switch (complexity) { switch (complexity)
{
case oN: case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); }; return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared: case oNSquared:
@ -36,13 +39,10 @@ BigOFunc* FittingCurve(BigO complexity) {
return [](IterationCount n) -> double { return std::pow(n, 3); }; return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN: case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */ /* Note: can't use log2 because Android's GNU STL lacks it */
return return [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
[](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN: case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */ /* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return [](IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); };
return kLog2E * n * log(static_cast<double>(n));
};
case o1: case o1:
default: default:
return [](IterationCount) { return 1.0; }; return [](IterationCount) { return 1.0; };
@ -50,8 +50,10 @@ BigOFunc* FittingCurve(BigO complexity) {
} }
// Function to return an string for the calculated complexity // Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) { std::string GetBigOString(BigO complexity)
switch (complexity) { {
switch (complexity)
{
case oN: case oN:
return "N"; return "N";
case oNSquared: case oNSquared:
@ -79,16 +81,16 @@ std::string GetBigOString(BigO complexity) {
// For a deeper explanation on the algorithm logic, please refer to // For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n, LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, BigOFunc *fitting_curve)
const std::vector<double>& time, {
BigOFunc* fitting_curve) {
double sigma_gn = 0.0; double sigma_gn = 0.0;
double sigma_gn_squared = 0.0; double sigma_gn_squared = 0.0;
double sigma_time = 0.0; double sigma_time = 0.0;
double sigma_time_gn = 0.0; double sigma_time_gn = 0.0;
// Calculate least square fitting parameter // Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) { for (size_t i = 0; i < n.size(); ++i)
{
double gn_i = fitting_curve(n[i]); double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i; sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i; sigma_gn_squared += gn_i * gn_i;
@ -104,7 +106,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// Calculate RMS // Calculate RMS
double rms = 0.0; double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) { for (size_t i = 0; i < n.size(); ++i)
{
double fit = result.coef * fitting_curve(n[i]); double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2); rms += pow((time[i] - fit), 2);
} }
@ -123,8 +126,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// - complexity : If different than oAuto, the fitting curve will stick to // - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best // this one. If it is oAuto, it will be calculated the best
// fitting curve. // fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n, LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, const BigO complexity)
const std::vector<double>& time, const BigO complexity) { {
CHECK_EQ(n.size(), time.size()); CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given // benchmark runs are given
@ -132,7 +135,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
LeastSq best_fit; LeastSq best_fit;
if (complexity == oAuto) { if (complexity == oAuto)
{
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed}; std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve // Take o1 as default best fitting curve
@ -140,14 +144,18 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
best_fit.complexity = o1; best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one // Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) { for (const auto &fit : fit_curves)
{
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit)); LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) { if (current_fit.rms < best_fit.rms)
{
best_fit = current_fit; best_fit = current_fit;
best_fit.complexity = fit; best_fit.complexity = fit;
} }
} }
} else { }
else
{
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity)); best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity; best_fit.complexity = complexity;
} }
@ -155,12 +163,13 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
return best_fit; return best_fit;
} }
std::vector<BenchmarkReporter::Run> ComputeBigO( std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports)
const std::vector<BenchmarkReporter::Run>& reports) { {
typedef BenchmarkReporter::Run Run; typedef BenchmarkReporter::Run Run;
std::vector<Run> results; std::vector<Run> results;
if (reports.size() < 2) return results; if (reports.size() < 2)
return results;
// Accumulators. // Accumulators.
std::vector<int64_t> n; std::vector<int64_t> n;
@ -168,7 +177,8 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
std::vector<double> cpu_time; std::vector<double> cpu_time;
// Populate the accumulators. // Populate the accumulators.
for (const Run& run : reports) { for (const Run &run : reports)
{
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n); n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations); real_time.push_back(run.real_accumulated_time / run.iterations);
@ -178,10 +188,13 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
LeastSq result_cpu; LeastSq result_cpu;
LeastSq result_real; LeastSq result_real;
if (reports[0].complexity == oLambda) { if (reports[0].complexity == oLambda)
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda); result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda); result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else { }
else
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
} }

View File

@ -23,12 +23,12 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// Return a vector containing the bigO and RMS information for the specified // Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned. // list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO( std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports);
const std::vector<BenchmarkReporter::Run>& reports);
// This data structure will contain the result returned by MinimalLeastSq // This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as // - coef : Estimated coeficient for the high-order term as
@ -39,8 +39,11 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// the same value. In case BigO::oAuto has been selected, this // the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected. // parameter will return the best fitting curve detected.
struct LeastSq { struct LeastSq
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {} {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone)
{
}
double coef; double coef;
double rms; double rms;

View File

@ -31,9 +31,11 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
bool ConsoleReporter::ReportContext(const Context& context) { bool ConsoleReporter::ReportContext(const Context &context)
{
name_field_width_ = context.name_field_width; name_field_width_ = context.name_field_width;
printed_header_ = false; printed_header_ = false;
prev_counters_.clear(); prev_counters_.clear();
@ -41,26 +43,32 @@ bool ConsoleReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context); PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream())
GetErrorStream() {
<< "Color printing is only supported for stdout on windows." GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n"; " Disabling color printing\n";
output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
} }
#endif #endif
return true; return true;
} }
void ConsoleReporter::PrintHeader(const Run& run) { void ConsoleReporter::PrintHeader(const Run &run)
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), {
"Benchmark", "Time", "CPU", "Iterations"); std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), "Benchmark", "Time",
if(!run.counters.empty()) { "CPU", "Iterations");
if(output_options_ & OO_Tabular) { if (!run.counters.empty())
for(auto const& c : run.counters) { {
if (output_options_ & OO_Tabular)
{
for (auto const &c : run.counters)
{
str += FormatString(" %10s", c.first.c_str()); str += FormatString(" %10s", c.first.c_str());
} }
} else { }
else
{
str += " UserCounters..."; str += " UserCounters...";
} }
} }
@ -68,16 +76,18 @@ void ConsoleReporter::PrintHeader(const Run& run) {
GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
} }
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) { void ConsoleReporter::ReportRuns(const std::vector<Run> &reports)
for (const auto& run : reports) { {
for (const auto &run : reports)
{
// print the header: // print the header:
// --- if none was printed yet // --- if none was printed yet
bool print_header = !printed_header_; bool print_header = !printed_header_;
// --- or if the format is tabular and this run // --- or if the format is tabular and this run
// has different fields from the prev header // has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) && print_header |= (output_options_ & OO_Tabular) && (!internal::SameNames(run.counters, prev_counters_));
(!internal::SameNames(run.counters, prev_counters_)); if (print_header)
if (print_header) { {
printed_header_ = true; printed_header_ = true;
prev_counters_ = run.counters; prev_counters_ = run.counters;
PrintHeader(run); PrintHeader(run);
@ -89,42 +99,43 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
} }
} }
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, static void IgnoreColorPrint(std::ostream &out, LogColor, const char *fmt, ...)
...) { {
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
out << FormatString(fmt, args); out << FormatString(fmt, args);
va_end(args); va_end(args);
} }
static std::string FormatTime(double time)
static std::string FormatTime(double time) { {
// Align decimal places... // Align decimal places...
if (time < 1.0) { if (time < 1.0)
{
return FormatString("%10.3f", time); return FormatString("%10.3f", time);
} }
if (time < 10.0) { if (time < 10.0)
{
return FormatString("%10.2f", time); return FormatString("%10.2f", time);
} }
if (time < 100.0) { if (time < 100.0)
{
return FormatString("%10.1f", time); return FormatString("%10.1f", time);
} }
return FormatString("%10.0f", time); return FormatString("%10.0f", time);
} }
void ConsoleReporter::PrintRunData(const Run& result) { void ConsoleReporter::PrintRunData(const Run &result)
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); {
auto& Out = GetOutputStream(); typedef void(PrinterFn)(std::ostream &, LogColor, const char *, ...);
PrinterFn* printer = (output_options_ & OO_Color) ? auto &Out = GetOutputStream();
(PrinterFn*)ColorPrintf : IgnoreColorPrint; PrinterFn *printer = (output_options_ & OO_Color) ? (PrinterFn *)ColorPrintf : IgnoreColorPrint;
auto name_color = auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str());
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str());
if (result.error_occurred) { if (result.error_occurred)
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", {
result.error_message.c_str()); printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n"); printer(Out, COLOR_DEFAULT, "\n");
return; return;
} }
@ -134,40 +145,46 @@ void ConsoleReporter::PrintRunData(const Run& result) {
const std::string real_time_str = FormatTime(real_time); const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time); const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o)
if (result.report_big_o) { {
std::string big_o = GetBigOString(result.complexity); std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), cpu_time, big_o.c_str());
cpu_time, big_o.c_str()); }
} else if (result.report_rms) { else if (result.report_rms)
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", {
cpu_time * 100, "%"); printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%");
} else { }
const char* timeLabel = GetTimeUnitString(result.time_unit); else
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, {
cpu_time_str.c_str(), timeLabel); const char *timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, cpu_time_str.c_str(),
timeLabel);
} }
if (!result.report_big_o && !result.report_rms) { if (!result.report_big_o && !result.report_rms)
{
printer(Out, COLOR_CYAN, "%10lld", result.iterations); printer(Out, COLOR_CYAN, "%10lld", result.iterations);
} }
for (auto& c : result.counters) { for (auto &c : result.counters)
const std::size_t cNameLen = std::max(std::string::size_type(10), {
c.first.length()); const std::size_t cNameLen = std::max(std::string::size_type(10), c.first.length());
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK); auto const &s = HumanReadableNumber(c.second.value, c.second.oneK);
const char* unit = ""; const char *unit = "";
if (c.second.flags & Counter::kIsRate) if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) { if (output_options_ & OO_Tabular)
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), {
unit); printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit);
} else { }
else
{
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit); printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
} }
} }
if (!result.report_label.empty()) { if (!result.report_label.empty())
{
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
} }

View File

@ -14,62 +14,80 @@
#include "counter.h" #include "counter.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
double Finish(Counter const& c, IterationCount iterations, double cpu_time, double Finish(Counter const &c, IterationCount iterations, double cpu_time, double num_threads)
double num_threads) { {
double v = c.value; double v = c.value;
if (c.flags & Counter::kIsRate) { if (c.flags & Counter::kIsRate)
{
v /= cpu_time; v /= cpu_time;
} }
if (c.flags & Counter::kAvgThreads) { if (c.flags & Counter::kAvgThreads)
{
v /= num_threads; v /= num_threads;
} }
if (c.flags & Counter::kIsIterationInvariant) { if (c.flags & Counter::kIsIterationInvariant)
{
v *= iterations; v *= iterations;
} }
if (c.flags & Counter::kAvgIterations) { if (c.flags & Counter::kAvgIterations)
{
v /= iterations; v /= iterations;
} }
if (c.flags & Counter::kInvert) { // Invert is *always* last. if (c.flags & Counter::kInvert)
{ // Invert is *always* last.
v = 1.0 / v; v = 1.0 / v;
} }
return v; return v;
} }
void Finish(UserCounters* l, IterationCount iterations, double cpu_time, void Finish(UserCounters *l, IterationCount iterations, double cpu_time, double num_threads)
double num_threads) { {
for (auto& c : *l) { for (auto &c : *l)
{
c.second.value = Finish(c.second, iterations, cpu_time, num_threads); c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
} }
} }
void Increment(UserCounters* l, UserCounters const& r) { void Increment(UserCounters *l, UserCounters const &r)
{
// add counters present in both or just in *l // add counters present in both or just in *l
for (auto& c : *l) { for (auto &c : *l)
{
auto it = r.find(c.first); auto it = r.find(c.first);
if (it != r.end()) { if (it != r.end())
{
c.second.value = c.second + it->second; c.second.value = c.second + it->second;
} }
} }
// add counters present in r, but not in *l // add counters present in r, but not in *l
for (auto const& tc : r) { for (auto const &tc : r)
{
auto it = l->find(tc.first); auto it = l->find(tc.first);
if (it == l->end()) { if (it == l->end())
{
(*l)[tc.first] = tc.second; (*l)[tc.first] = tc.second;
} }
} }
} }
bool SameNames(UserCounters const& l, UserCounters const& r) { bool SameNames(UserCounters const &l, UserCounters const &r)
if (&l == &r) return true; {
if (l.size() != r.size()) { if (&l == &r)
return true;
if (l.size() != r.size())
{
return false; return false;
} }
for (auto const& c : l) { for (auto const &c : l)
if (r.find(c.first) == r.end()) { {
if (r.find(c.first) == r.end())
{
return false; return false;
} }
} }

View File

@ -17,14 +17,15 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// these counter-related functions are hidden to reduce API surface. // these counter-related functions are hidden to reduce API surface.
namespace internal { namespace internal
void Finish(UserCounters* l, IterationCount iterations, double time, {
double num_threads); void Finish(UserCounters *l, IterationCount iterations, double time, double num_threads);
void Increment(UserCounters* l, UserCounters const& r); void Increment(UserCounters *l, UserCounters const &r);
bool SameNames(UserCounters const& l, UserCounters const& r); bool SameNames(UserCounters const &l, UserCounters const &r);
} // end namespace internal } // end namespace internal
} // end namespace benchmark } // end namespace benchmark

View File

@ -28,39 +28,52 @@
// File format reference: http://edoceo.com/utilitas/csv-file-format. // File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark { namespace benchmark
{
namespace { namespace
std::vector<std::string> elements = { {
"name", "iterations", "real_time", "cpu_time", std::vector<std::string> elements = {"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label", "time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"}; "error_occurred", "error_message"};
} // namespace } // namespace
std::string CsvEscape(const std::string & s) { std::string CsvEscape(const std::string &s)
{
std::string tmp; std::string tmp;
tmp.reserve(s.size() + 2); tmp.reserve(s.size() + 2);
for (char c : s) { for (char c : s)
switch (c) { {
case '"' : tmp += "\"\""; break; switch (c)
default : tmp += c; break; {
case '"':
tmp += "\"\"";
break;
default:
tmp += c;
break;
} }
} }
return '"' + tmp + '"'; return '"' + tmp + '"';
} }
bool CSVReporter::ReportContext(const Context& context) { bool CSVReporter::ReportContext(const Context &context)
{
PrintBasicContext(&GetErrorStream(), context); PrintBasicContext(&GetErrorStream(), context);
return true; return true;
} }
void CSVReporter::ReportRuns(const std::vector<Run>& reports) { void CSVReporter::ReportRuns(const std::vector<Run> &reports)
std::ostream& Out = GetOutputStream(); {
std::ostream &Out = GetOutputStream();
if (!printed_header_) { if (!printed_header_)
{
// save the names of all the user counters // save the names of all the user counters
for (const auto& run : reports) { for (const auto &run : reports)
for (const auto& cnt : run.counters) { {
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue; continue;
user_counter_names_.insert(cnt.first); user_counter_names_.insert(cnt.first);
@ -68,41 +81,49 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
} }
// print the header // print the header
for (auto B = elements.begin(); B != elements.end();) { for (auto B = elements.begin(); B != elements.end();)
{
Out << *B++; Out << *B++;
if (B != elements.end()) Out << ","; if (B != elements.end())
Out << ",";
} }
for (auto B = user_counter_names_.begin(); for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();)
B != user_counter_names_.end();) { {
Out << ",\"" << *B++ << "\""; Out << ",\"" << *B++ << "\"";
} }
Out << "\n"; Out << "\n";
printed_header_ = true; printed_header_ = true;
} else { }
else
{
// check that all the current counters are saved in the name set // check that all the current counters are saved in the name set
for (const auto& run : reports) { for (const auto &run : reports)
for (const auto& cnt : run.counters) { {
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue; continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. " << "All counters must be present in each run. "
<< "Counter named \"" << cnt.first << "Counter named \"" << cnt.first << "\" was not in a run after being added to the header";
<< "\" was not in a run after being added to the header";
} }
} }
} }
// print results for each run // print results for each run
for (const auto& run : reports) { for (const auto &run : reports)
{
PrintRunData(run); PrintRunData(run);
} }
} }
void CSVReporter::PrintRunData(const Run& run) { void CSVReporter::PrintRunData(const Run &run)
std::ostream& Out = GetOutputStream(); {
std::ostream &Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ","; Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) { if (run.error_occurred)
{
Out << std::string(elements.size() - 3, ','); Out << std::string(elements.size() - 3, ',');
Out << "true,"; Out << "true,";
Out << CsvEscape(run.error_message) << "\n"; Out << CsvEscape(run.error_message) << "\n";
@ -110,7 +131,8 @@ void CSVReporter::PrintRunData(const Run& run) {
} }
// Do not print iteration on bigO and RMS report // Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) { if (!run.report_big_o && !run.report_rms)
{
Out << run.iterations; Out << run.iterations;
} }
Out << ","; Out << ",";
@ -119,32 +141,42 @@ void CSVReporter::PrintRunData(const Run& run) {
Out << run.GetAdjustedCPUTime() << ","; Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report // Do not print timeLabel on bigO and RMS report
if (run.report_big_o) { if (run.report_big_o)
{
Out << GetBigOString(run.complexity); Out << GetBigOString(run.complexity);
} else if (!run.report_rms) { }
else if (!run.report_rms)
{
Out << GetTimeUnitString(run.time_unit); Out << GetTimeUnitString(run.time_unit);
} }
Out << ","; Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end()) { if (run.counters.find("bytes_per_second") != run.counters.end())
{
Out << run.counters.at("bytes_per_second"); Out << run.counters.at("bytes_per_second");
} }
Out << ","; Out << ",";
if (run.counters.find("items_per_second") != run.counters.end()) { if (run.counters.find("items_per_second") != run.counters.end())
{
Out << run.counters.at("items_per_second"); Out << run.counters.at("items_per_second");
} }
Out << ","; Out << ",";
if (!run.report_label.empty()) { if (!run.report_label.empty())
{
Out << CsvEscape(run.report_label); Out << CsvEscape(run.report_label);
} }
Out << ",,"; // for error_occurred and error_message Out << ",,"; // for error_occurred and error_message
// Print user counters // Print user counters
for (const auto& ucn : user_counter_names_) { for (const auto &ucn : user_counter_names_)
{
auto it = run.counters.find(ucn); auto it = run.counters.find(ucn);
if (it == run.counters.end()) { if (it == run.counters.end())
{
Out << ","; Out << ",";
} else { }
else
{
Out << "," << it->second; Out << "," << it->second;
} }
} }

View File

@ -50,15 +50,18 @@ extern "C" uint64_t __rdtsc();
#include <emscripten.h> #include <emscripten.h>
#endif #endif
namespace benchmark { namespace benchmark
{
// NOTE: only i386 and x86_64 have been well tested. // NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on // PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14 // http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also // with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h // https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock { namespace cycleclock
{
// This should return the number of cycles since power-on. Thread-safe. // This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() { inline BENCHMARK_ALWAYS_INLINE int64_t Now()
{
#if defined(BENCHMARK_OS_MACOSX) #if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of // this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that // architecture, to return the number of "mach time units" that
@ -90,8 +93,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
return tb; return tb;
#else #else
uint32_t tbl, tbu0, tbu1; uint32_t tbl, tbu0, tbu1;
asm volatile( asm volatile("mftbu %0\n"
"mftbu %0\n"
"mftbl %1\n" "mftbl %1\n"
"mftbu %2" "mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
@ -149,9 +151,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
uint32_t pmcntenset; uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions. // Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. if (pmuseren & 1)
{ // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting? if (pmcntenset & 0x80000000ul)
{ // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle // The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6 return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
@ -178,8 +182,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
uint32_t cycles_lo, cycles_hi0, cycles_hi1; uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above. // This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching. // Implemented in assembly because Clang insisted on branching.
asm volatile( asm volatile("rdcycleh %0\n"
"rdcycleh %0\n"
"rdcycle %1\n" "rdcycle %1\n"
"rdcycleh %2\n" "rdcycleh %2\n"
"sub %0, %0, %2\n" "sub %0, %0, %2\n"

View File

@ -28,53 +28,80 @@
#include "string_util.h" #include "string_util.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
{
namespace { namespace
{
std::string StrEscape(const std::string & s) { std::string StrEscape(const std::string &s)
{
std::string tmp; std::string tmp;
tmp.reserve(s.size()); tmp.reserve(s.size());
for (char c : s) { for (char c : s)
switch (c) { {
case '\b': tmp += "\\b"; break; switch (c)
case '\f': tmp += "\\f"; break; {
case '\n': tmp += "\\n"; break; case '\b':
case '\r': tmp += "\\r"; break; tmp += "\\b";
case '\t': tmp += "\\t"; break; break;
case '\\': tmp += "\\\\"; break; case '\f':
case '"' : tmp += "\\\""; break; tmp += "\\f";
default : tmp += c; break; break;
case '\n':
tmp += "\\n";
break;
case '\r':
tmp += "\\r";
break;
case '\t':
tmp += "\\t";
break;
case '\\':
tmp += "\\\\";
break;
case '"':
tmp += "\\\"";
break;
default:
tmp += c;
break;
} }
} }
return tmp; return tmp;
} }
std::string FormatKV(std::string const& key, std::string const& value) { std::string FormatKV(std::string const &key, std::string const &value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
} }
std::string FormatKV(std::string const& key, const char* value) { std::string FormatKV(std::string const &key, const char *value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str()); return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
} }
std::string FormatKV(std::string const& key, bool value) { std::string FormatKV(std::string const &key, bool value)
{
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false"); return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
} }
std::string FormatKV(std::string const& key, int64_t value) { std::string FormatKV(std::string const &key, int64_t value)
{
std::stringstream ss; std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value; ss << '"' << StrEscape(key) << "\": " << value;
return ss.str(); return ss.str();
} }
std::string FormatKV(std::string const& key, IterationCount value) { std::string FormatKV(std::string const &key, IterationCount value)
{
std::stringstream ss; std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value; ss << '"' << StrEscape(key) << "\": " << value;
return ss.str(); return ss.str();
} }
std::string FormatKV(std::string const& key, double value) { std::string FormatKV(std::string const &key, double value)
{
std::stringstream ss; std::stringstream ss;
ss << '"' << StrEscape(key) << "\": "; ss << '"' << StrEscape(key) << "\": ";
@ -82,22 +109,25 @@ std::string FormatKV(std::string const& key, double value) {
ss << (value < 0 ? "-" : "") << "NaN"; ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value)) else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity"; ss << (value < 0 ? "-" : "") << "Infinity";
else { else
const auto max_digits10 = {
std::numeric_limits<decltype(value)>::max_digits10; const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1; const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10) ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
<< value;
} }
return ss.str(); return ss.str();
} }
int64_t RoundDouble(double v) { return std::lround(v); } int64_t RoundDouble(double v)
{
return std::lround(v);
}
} // end namespace } // end namespace
bool JSONReporter::ReportContext(const Context& context) { bool JSONReporter::ReportContext(const Context &context)
std::ostream& out = GetOutputStream(); {
std::ostream &out = GetOutputStream();
out << "{\n"; out << "{\n";
std::string inner_indent(2, ' '); std::string inner_indent(2, ' ');
@ -111,44 +141,40 @@ bool JSONReporter::ReportContext(const Context& context) {
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) { if (Context::executable_name)
{
out << indent << FormatKV("executable", Context::executable_name) << ",\n"; out << indent << FormatKV("executable", Context::executable_name) << ",\n";
} }
CPUInfo const& info = context.cpu_info; CPUInfo const &info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) << ",\n";
<< ",\n"; out << indent << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n";
out << indent out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) << ",\n";
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
<< ",\n";
out << indent << "\"caches\": [\n"; out << indent << "\"caches\": [\n";
indent = std::string(6, ' '); indent = std::string(6, ' ');
std::string cache_indent(8, ' '); std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) { for (size_t i = 0; i < info.caches.size(); ++i)
auto& CI = info.caches[i]; {
auto &CI = info.caches[i];
out << indent << "{\n"; out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n"; out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) << ",\n";
<< ",\n"; out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent out << cache_indent << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) << "\n";
<< FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
<< "\n";
out << indent << "}"; out << indent << "}";
if (i != info.caches.size() - 1) out << ","; if (i != info.caches.size() - 1)
out << ",";
out << "\n"; out << "\n";
} }
indent = std::string(4, ' '); indent = std::string(4, ' ');
out << indent << "],\n"; out << indent << "],\n";
out << indent << "\"load_avg\": ["; out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { for (auto it = info.load_avg.begin(); it != info.load_avg.end();)
{
out << *it++; out << *it++;
if (it != info.load_avg.end()) out << ","; if (it != info.load_avg.end())
out << ",";
} }
out << "],\n"; out << "],\n";
@ -164,40 +190,48 @@ bool JSONReporter::ReportContext(const Context& context) {
return true; return true;
} }
void JSONReporter::ReportRuns(std::vector<Run> const& reports) { void JSONReporter::ReportRuns(std::vector<Run> const &reports)
if (reports.empty()) { {
if (reports.empty())
{
return; return;
} }
std::string indent(4, ' '); std::string indent(4, ' ');
std::ostream& out = GetOutputStream(); std::ostream &out = GetOutputStream();
if (!first_report_) { if (!first_report_)
{
out << ",\n"; out << ",\n";
} }
first_report_ = false; first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) { for (auto it = reports.begin(); it != reports.end(); ++it)
{
out << indent << "{\n"; out << indent << "{\n";
PrintRunData(*it); PrintRunData(*it);
out << indent << '}'; out << indent << '}';
auto it_cp = it; auto it_cp = it;
if (++it_cp != reports.end()) { if (++it_cp != reports.end())
{
out << ",\n"; out << ",\n";
} }
} }
} }
void JSONReporter::Finalize() { void JSONReporter::Finalize()
{
// Close the list of benchmarks and the top level object. // Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n"; GetOutputStream() << "\n ]\n}\n";
} }
void JSONReporter::PrintRunData(Run const& run) { void JSONReporter::PrintRunData(Run const &run)
{
std::string indent(6, ' '); std::string indent(6, ' ');
std::ostream& out = GetOutputStream(); std::ostream &out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n"; out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char* { out << indent << FormatKV("run_type", [&run]() -> const char * {
switch (run.run_type) { switch (run.run_type)
{
case BenchmarkReporter::Run::RT_Iteration: case BenchmarkReporter::Run::RT_Iteration:
return "iteration"; return "iteration";
case BenchmarkReporter::Run::RT_Aggregate: case BenchmarkReporter::Run::RT_Aggregate:
@ -206,45 +240,52 @@ void JSONReporter::PrintRunData(Run const& run) {
BENCHMARK_UNREACHABLE(); BENCHMARK_UNREACHABLE();
}()) << ",\n"; }()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n"; out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) { if (run.run_type != BenchmarkReporter::Run::RT_Aggregate)
out << indent << FormatKV("repetition_index", run.repetition_index) {
<< ",\n"; out << indent << FormatKV("repetition_index", run.repetition_index) << ",\n";
} }
out << indent << FormatKV("threads", run.threads) << ",\n"; out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { if (run.run_type == BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
} }
if (run.error_occurred) { if (run.error_occurred)
{
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n"; out << indent << FormatKV("error_message", run.error_message) << ",\n";
} }
if (!run.report_big_o && !run.report_rms) { if (!run.report_big_o && !run.report_rms)
{
out << indent << FormatKV("iterations", run.iterations) << ",\n"; out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n" out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); }
} else if (run.report_big_o) { else if (run.report_big_o)
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) {
<< ",\n"; out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n";
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) { }
else if (run.report_rms)
{
out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
} }
for (auto& c : run.counters) { for (auto &c : run.counters)
{
out << ",\n" << indent << FormatKV(c.first, c.second); out << ",\n" << indent << FormatKV(c.first, c.second);
} }
if (run.has_memory_result) { if (run.has_memory_result)
{
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
} }
if (!run.report_label.empty()) { if (!run.report_label.empty())
{
out << ",\n" << indent << FormatKV("label", run.report_label); out << ",\n" << indent << FormatKV("label", run.report_label);
} }
out << '\n'; out << '\n';

View File

@ -6,58 +6,70 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&); typedef std::basic_ostream<char> &(EndLType)(std::basic_ostream<char> &);
class LogType { class LogType
friend LogType& GetNullLogInstance(); {
friend LogType& GetErrorLogInstance(); friend LogType &GetNullLogInstance();
friend LogType &GetErrorLogInstance();
// FIXME: Add locking to output. // FIXME: Add locking to output.
template <class Tp> template <class Tp> friend LogType &operator<<(LogType &, Tp const &);
friend LogType& operator<<(LogType&, Tp const&); friend LogType &operator<<(LogType &, EndLType *);
friend LogType& operator<<(LogType&, EndLType*);
private: private:
LogType(std::ostream* out) : out_(out) {} LogType(std::ostream *out) : out_(out)
std::ostream* out_; {
}
std::ostream *out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
}; };
template <class Tp> template <class Tp> LogType &operator<<(LogType &log, Tp const &value)
LogType& operator<<(LogType& log, Tp const& value) { {
if (log.out_) { if (log.out_)
{
*log.out_ << value; *log.out_ << value;
} }
return log; return log;
} }
inline LogType& operator<<(LogType& log, EndLType* m) { inline LogType &operator<<(LogType &log, EndLType *m)
if (log.out_) { {
if (log.out_)
{
*log.out_ << m; *log.out_ << m;
} }
return log; return log;
} }
inline int& LogLevel() { inline int &LogLevel()
{
static int log_level = 0; static int log_level = 0;
return log_level; return log_level;
} }
inline LogType& GetNullLogInstance() { inline LogType &GetNullLogInstance()
{
static LogType log(nullptr); static LogType log(nullptr);
return log; return log;
} }
inline LogType& GetErrorLogInstance() { inline LogType &GetErrorLogInstance()
{
static LogType log(&std::clog); static LogType log(&std::clog);
return log; return log;
} }
inline LogType& GetLogInstanceForLevel(int level) { inline LogType &GetLogInstanceForLevel(int level)
if (level <= LogLevel()) { {
if (level <= LogLevel())
{
return GetErrorLogInstance(); return GetErrorLogInstance();
} }
return GetNullLogInstance(); return GetNullLogInstance();

View File

@ -22,49 +22,38 @@
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \ #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \ #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \ #define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \ #define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \ #define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \ #define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \ #define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \ #define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \ #define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \ #define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) #define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \ #define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \ #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark { namespace benchmark
{
typedef std::condition_variable Condition; typedef std::condition_variable Condition;
@ -72,49 +61,76 @@ typedef std::condition_variable Condition;
// we can annotate them with thread safety attributes and use the // we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be // -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations. // used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex { class CAPABILITY("mutex") Mutex
{
public: public:
Mutex() {} Mutex()
{
}
void lock() ACQUIRE() { mut_.lock(); } void lock() ACQUIRE()
void unlock() RELEASE() { mut_.unlock(); } {
std::mutex& native_handle() { return mut_; } mut_.lock();
}
void unlock() RELEASE()
{
mut_.unlock();
}
std::mutex &native_handle()
{
return mut_;
}
private: private:
std::mutex mut_; std::mutex mut_;
}; };
class SCOPED_CAPABILITY MutexLock { class SCOPED_CAPABILITY MutexLock
{
typedef std::unique_lock<std::mutex> MutexLockImp; typedef std::unique_lock<std::mutex> MutexLockImp;
public: public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} MutexLock(Mutex &m) ACQUIRE(m) : ml_(m.native_handle())
~MutexLock() RELEASE() {} {
MutexLockImp& native_handle() { return ml_; } }
~MutexLock() RELEASE()
{
}
MutexLockImp &native_handle()
{
return ml_;
}
private: private:
MutexLockImp ml_; MutexLockImp ml_;
}; };
class Barrier { class Barrier
{
public: public:
Barrier(int num_threads) : running_threads_(num_threads) {} Barrier(int num_threads) : running_threads_(num_threads)
{
}
// Called by each thread // Called by each thread
bool wait() EXCLUDES(lock_) { bool wait() EXCLUDES(lock_)
{
bool last_thread = false; bool last_thread = false;
{ {
MutexLock ml(lock_); MutexLock ml(lock_);
last_thread = createBarrier(ml); last_thread = createBarrier(ml);
} }
if (last_thread) phase_condition_.notify_all(); if (last_thread)
phase_condition_.notify_all();
return last_thread; return last_thread;
} }
void removeThread() EXCLUDES(lock_) { void removeThread() EXCLUDES(lock_)
{
MutexLock ml(lock_); MutexLock ml(lock_);
--running_threads_; --running_threads_;
if (entered_ != 0) phase_condition_.notify_all(); if (entered_ != 0)
phase_condition_.notify_all();
} }
private: private:
@ -129,10 +145,12 @@ class Barrier {
// Enter the barrier and wait until all other threads have also // Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to // entered the barrier. Returns iff this is the last thread to
// enter the barrier. // enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) { bool createBarrier(MutexLock &ml) REQUIRES(lock_)
{
CHECK_LT(entered_, running_threads_); CHECK_LT(entered_, running_threads_);
entered_++; entered_++;
if (entered_ < running_threads_) { if (entered_ < running_threads_)
{
// Wait for all threads to enter // Wait for all threads to enter
int phase_number_cp = phase_number_; int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() { auto cb = [this, phase_number_cp]() {
@ -140,7 +158,8 @@ class Barrier {
entered_ == running_threads_; // A thread has aborted in error entered_ == running_threads_; // A thread has aborted in error
}; };
phase_condition_.wait(ml.native_handle(), cb); phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false; if (phase_number_ > phase_number_cp)
return false;
// else (running_threads_ == entered_) and we are the last thread. // else (running_threads_ == entered_) and we are the last thread.
} }
// Last thread has reached the barrier // Last thread has reached the barrier

View File

@ -54,13 +54,17 @@
#include "check.h" #include "check.h"
namespace benchmark { namespace benchmark
{
// A wrapper around the POSIX regular expression API that provides automatic // A wrapper around the POSIX regular expression API that provides automatic
// cleanup // cleanup
class Regex { class Regex
{
public: public:
Regex() : init_(false) {} Regex() : init_(false)
{
}
~Regex(); ~Regex();
@ -68,10 +72,10 @@ class Regex {
// //
// On failure (and if error is not nullptr), error is populated with a human // On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs. // readable error message if an error occurs.
bool Init(const std::string& spec, std::string* error); bool Init(const std::string &spec, std::string *error);
// Returns whether str matches the compiled regular expression. // Returns whether str matches the compiled regular expression.
bool Match(const std::string& str); bool Match(const std::string &str);
private: private:
bool init_; bool init_;
@ -87,18 +91,22 @@ class Regex {
#if defined(HAVE_STD_REGEX) #if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) { inline bool Regex::Init(const std::string &spec, std::string *error)
{
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS #ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning ((void)error); // suppress unused warning
#else #else
try { try
{
#endif #endif
re_ = std::regex(spec, std::regex_constants::extended); re_ = std::regex(spec, std::regex_constants::extended);
init_ = true; init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
} }
catch (const std::regex_error& e) { catch (const std::regex_error &e)
if (error) { {
if (error)
{
*error = e.what(); *error = e.what();
} }
} }
@ -106,22 +114,29 @@ catch (const std::regex_error& e) {
return init_; return init_;
} }
inline Regex::~Regex() {} inline Regex::~Regex()
{
}
inline bool Regex::Match(const std::string& str) { inline bool Regex::Match(const std::string &str)
if (!init_) { {
if (!init_)
{
return false; return false;
} }
return std::regex_search(str, re_); return std::regex_search(str, re_);
} }
#else #else
inline bool Regex::Init(const std::string& spec, std::string* error) { inline bool Regex::Init(const std::string &spec, std::string *error)
{
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0) { if (ec != 0)
if (error) { {
if (error)
{
size_t needed = regerror(ec, &re_, nullptr, 0); size_t needed = regerror(ec, &re_, nullptr, 0);
char* errbuf = new char[needed]; char *errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed); regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate // regerror returns the number of bytes necessary to null terminate
@ -139,14 +154,18 @@ inline bool Regex::Init(const std::string& spec, std::string* error) {
return true; return true;
} }
inline Regex::~Regex() { inline Regex::~Regex()
if (init_) { {
if (init_)
{
regfree(&re_); regfree(&re_);
} }
} }
inline bool Regex::Match(const std::string& str) { inline bool Regex::Match(const std::string &str)
if (!init_) { {
if (!init_)
{
return false; return false;
} }
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;

View File

@ -24,15 +24,19 @@
#include "check.h" #include "check.h"
#include "string_util.h" #include "string_util.h"
namespace benchmark { namespace benchmark
{
BenchmarkReporter::BenchmarkReporter() BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr)
: output_stream_(&std::cout), error_stream_(&std::cerr) {} {
}
BenchmarkReporter::~BenchmarkReporter() {} BenchmarkReporter::~BenchmarkReporter()
{
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out, void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context)
Context const &context) { {
CHECK(out) << "cannot be null"; CHECK(out) << "cannot be null";
auto &Out = *out; auto &Out = *out;
@ -42,29 +46,33 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "Running " << context.executable_name << "\n"; Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info; const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X " Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n"; << ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0) { if (info.caches.size() != 0)
{
Out << "CPU Caches:\n"; Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) { for (auto &CInfo : info.caches)
Out << " L" << CInfo.level << " " << CInfo.type << " " {
<< (CInfo.size / 1024) << " KiB"; Out << " L" << CInfo.level << " " << CInfo.type << " " << (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0) if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n"; Out << "\n";
} }
} }
if (!info.load_avg.empty()) { if (!info.load_avg.empty())
{
Out << "Load Average: "; Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { for (auto It = info.load_avg.begin(); It != info.load_avg.end();)
{
Out << StrFormat("%.2f", *It++); Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end()) Out << ", "; if (It != info.load_avg.end())
Out << ", ";
} }
Out << "\n"; Out << "\n";
} }
if (info.scaling_enabled) { if (info.scaling_enabled)
{
Out << "***WARNING*** CPU scaling is enabled, the benchmark " Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra " "real time measurements may be noisy and will incur extra "
"overhead.\n"; "overhead.\n";
@ -79,26 +87,33 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
// No initializer because it's already initialized to NULL. // No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name; const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context() BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get())
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} {
}
std::string BenchmarkReporter::Run::benchmark_name() const { std::string BenchmarkReporter::Run::benchmark_name() const
{
std::string name = run_name.str(); std::string name = run_name.str();
if (run_type == RT_Aggregate) { if (run_type == RT_Aggregate)
{
name += "_" + aggregate_name; name += "_" + aggregate_name;
} }
return name; return name;
} }
double BenchmarkReporter::Run::GetAdjustedRealTime() const { double BenchmarkReporter::Run::GetAdjustedRealTime() const
{
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations); if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time; return new_time;
} }
double BenchmarkReporter::Run::GetAdjustedCPUTime() const { double BenchmarkReporter::Run::GetAdjustedCPUTime() const
{
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit); double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations); if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time; return new_time;
} }

View File

@ -24,15 +24,21 @@
#include <windows.h> #include <windows.h>
#endif #endif
namespace benchmark { namespace benchmark
{
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument. // Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } void SleepForMilliseconds(int milliseconds)
void SleepForSeconds(double seconds) { {
Sleep(milliseconds);
}
void SleepForSeconds(double seconds)
{
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds)); SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
} }
#else // BENCHMARK_OS_WINDOWS #else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) { void SleepForMicroseconds(int microseconds)
{
struct timespec sleep_time; struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
@ -40,11 +46,13 @@ void SleepForMicroseconds(int microseconds) {
; // Ignore signals and wait for the full interval to elapse. ; // Ignore signals and wait for the full interval to elapse.
} }
void SleepForMilliseconds(int milliseconds) { void SleepForMilliseconds(int milliseconds)
{
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
} }
void SleepForSeconds(double seconds) { void SleepForSeconds(double seconds)
{
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond)); SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
} }
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS

View File

@ -1,7 +1,8 @@
#ifndef BENCHMARK_SLEEP_H_ #ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_ #define BENCHMARK_SLEEP_H_
namespace benchmark { namespace benchmark
{
const int kNumMillisPerSecond = 1000; const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000; const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;

View File

@ -15,27 +15,30 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "check.h"
#include "statistics.h"
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <numeric> #include <numeric>
#include <string> #include <string>
#include <vector> #include <vector>
#include "check.h"
#include "statistics.h"
namespace benchmark { namespace benchmark
{
auto StatisticsSum = [](const std::vector<double>& v) { auto StatisticsSum = [](const std::vector<double> &v) { return std::accumulate(v.begin(), v.end(), 0.0); };
return std::accumulate(v.begin(), v.end(), 0.0);
};
double StatisticsMean(const std::vector<double>& v) { double StatisticsMean(const std::vector<double> &v)
if (v.empty()) return 0.0; {
if (v.empty())
return 0.0;
return StatisticsSum(v) * (1.0 / v.size()); return StatisticsSum(v) * (1.0 / v.size());
} }
double StatisticsMedian(const std::vector<double>& v) { double StatisticsMedian(const std::vector<double> &v)
if (v.size() < 3) return StatisticsMean(v); {
if (v.size() < 3)
return StatisticsMean(v);
std::vector<double> copy(v); std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2; auto center = copy.begin() + v.size() / 2;
@ -45,45 +48,47 @@ double StatisticsMedian(const std::vector<double>& v) {
// if yes, then center is the median // if yes, then center is the median
// it no, then we are looking for the average between center and the value // it no, then we are looking for the average between center and the value
// before // before
if (v.size() % 2 == 1) return *center; if (v.size() % 2 == 1)
return *center;
auto center2 = copy.begin() + v.size() / 2 - 1; auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end()); std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0; return (*center + *center2) / 2.0;
} }
// Return the sum of the squares of this sample set // Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double>& v) { auto SumSquares = [](const std::vector<double> &v) { return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); };
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
};
auto Sqr = [](const double dat) { return dat * dat; }; auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) { auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations // Avoid NaN due to imprecision in the calculations
if (dat < 0.0) return 0.0; if (dat < 0.0)
return 0.0;
return std::sqrt(dat); return std::sqrt(dat);
}; };
double StatisticsStdDev(const std::vector<double>& v) { double StatisticsStdDev(const std::vector<double> &v)
{
const auto mean = StatisticsMean(v); const auto mean = StatisticsMean(v);
if (v.empty()) return mean; if (v.empty())
return mean;
// Sample standard deviation is undefined for n = 1 // Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0; if (v.size() == 1)
return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size()); const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
} }
std::vector<BenchmarkReporter::Run> ComputeStats( std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports)
const std::vector<BenchmarkReporter::Run>& reports) { {
typedef BenchmarkReporter::Run Run; typedef BenchmarkReporter::Run Run;
std::vector<Run> results; std::vector<Run> results;
auto error_count = auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const &run) { return run.error_occurred; });
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; });
if (reports.size() - error_count < 2) { if (reports.size() - error_count < 2)
{
// We don't report aggregated data if there was a single run. // We don't report aggregated data if there was a single run.
return results; return results;
} }
@ -99,33 +104,42 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// can take this information from the first benchmark. // can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations; const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters // create stats for user counters
struct CounterStat { struct CounterStat
{
Counter c; Counter c;
std::vector<double> s; std::vector<double> s;
}; };
std::map<std::string, CounterStat> counter_stats; std::map<std::string, CounterStat> counter_stats;
for (Run const& r : reports) { for (Run const &r : reports)
for (auto const& cnt : r.counters) { {
for (auto const &cnt : r.counters)
{
auto it = counter_stats.find(cnt.first); auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) { if (it == counter_stats.end())
{
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}}); counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first); it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size()); it->second.s.reserve(reports.size());
} else { }
else
{
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
} }
} }
} }
// Populate the accumulators. // Populate the accumulators.
for (Run const& run : reports) { for (Run const &run : reports)
{
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations); CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue; if (run.error_occurred)
continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time); real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters // user counters
for (auto const& cnt : run.counters) { for (auto const &cnt : run.counters)
{
auto it = counter_stats.find(cnt.first); auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end()); CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second); it->second.s.emplace_back(cnt.second);
@ -134,17 +148,19 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Only add label if it is same for all runs // Only add label if it is same for all runs
std::string report_label = reports[0].report_label; std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) { for (std::size_t i = 1; i < reports.size(); i++)
if (reports[i].report_label != report_label) { {
if (reports[i].report_label != report_label)
{
report_label = ""; report_label = "";
break; break;
} }
} }
const double iteration_rescale_factor = const double iteration_rescale_factor = double(reports.size()) / double(run_iterations);
double(reports.size()) / double(run_iterations);
for (const auto& Stat : *reports[0].statistics) { for (const auto &Stat : *reports[0].statistics)
{
// Get the data from the accumulator to BenchmarkReporter::Run's. // Get the data from the accumulator to BenchmarkReporter::Run's.
Run data; Run data;
data.run_name = reports[0].run_name; data.run_name = reports[0].run_name;
@ -176,11 +192,11 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
data.time_unit = reports[0].time_unit; data.time_unit = reports[0].time_unit;
// user counters // user counters
for (auto const& kv : counter_stats) { for (auto const &kv : counter_stats)
{
// Do *NOT* rescale the custom counters. They are already properly scaled. // Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s); const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, counter_stats[kv.first].c.oneK);
counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c; data.counters[kv.first] = c;
} }

View File

@ -20,17 +20,17 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace benchmark { namespace benchmark
{
// Return a vector containing the mean, median and standard devation information // Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports' // (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned // contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats( std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports);
const std::vector<BenchmarkReporter::Run>& reports);
double StatisticsMean(const std::vector<double>& v); double StatisticsMean(const std::vector<double> &v);
double StatisticsMedian(const std::vector<double>& v); double StatisticsMedian(const std::vector<double> &v);
double StatisticsStdDev(const std::vector<double>& v); double StatisticsStdDev(const std::vector<double> &v);
} // end namespace benchmark } // end namespace benchmark

View File

@ -12,8 +12,10 @@
#include "arraysize.h" #include "arraysize.h"
namespace benchmark { namespace benchmark
namespace { {
namespace
{
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. // kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY"; const char kBigSIUnits[] = "kMGTPEZY";
@ -23,38 +25,40 @@ const char kBigIECUnits[] = "KMGTPEZY";
const char kSmallSIUnits[] = "munpfazy"; const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size. // We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), "SI and IEC unit arrays must be the same size");
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size"); "Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits); static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision, void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string *mantissa,
double one_k, std::string* mantissa, int64_t *exponent)
int64_t* exponent) { {
std::stringstream mantissa_stream; std::stringstream mantissa_stream;
if (val < 0) { if (val < 0)
{
mantissa_stream << "-"; mantissa_stream << "-";
val = -val; val = -val;
} }
// Adjust threshold so that it never excludes things which can't be rendered // Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits. // in 'precision' digits.
const double adjusted_threshold = const double adjusted_threshold = std::max(thresh, 1.0 / std::pow(10.0, precision));
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k; const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold; const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is // Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01; const double simple_threshold = 0.01;
if (val > big_threshold) { if (val > big_threshold)
{
// Positive powers // Positive powers
double scaled = val; double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) { for (size_t i = 0; i < arraysize(kBigSIUnits); ++i)
{
scaled /= one_k; scaled /= one_k;
if (scaled <= big_threshold) { if (scaled <= big_threshold)
{
mantissa_stream << scaled; mantissa_stream << scaled;
*exponent = i + 1; *exponent = i + 1;
*mantissa = mantissa_stream.str(); *mantissa = mantissa_stream.str();
@ -63,13 +67,18 @@ void ToExponentAndMantissa(double val, double thresh, int precision,
} }
mantissa_stream << val; mantissa_stream << val;
*exponent = 0; *exponent = 0;
} else if (val < small_threshold) { }
else if (val < small_threshold)
{
// Negative powers // Negative powers
if (val < simple_threshold) { if (val < simple_threshold)
{
double scaled = val; double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i)
{
scaled *= one_k; scaled *= one_k;
if (scaled >= small_threshold) { if (scaled >= small_threshold)
{
mantissa_stream << scaled; mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1); *exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str(); *mantissa = mantissa_stream.str();
@ -79,53 +88,59 @@ void ToExponentAndMantissa(double val, double thresh, int precision,
} }
mantissa_stream << val; mantissa_stream << val;
*exponent = 0; *exponent = 0;
} else { }
else
{
mantissa_stream << val; mantissa_stream << val;
*exponent = 0; *exponent = 0;
} }
*mantissa = mantissa_stream.str(); *mantissa = mantissa_stream.str();
} }
std::string ExponentToPrefix(int64_t exponent, bool iec) { std::string ExponentToPrefix(int64_t exponent, bool iec)
if (exponent == 0) return ""; {
if (exponent == 0)
return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return ""; if (index >= kUnitsSize)
return "";
const char* array = const char *array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec) if (iec)
return array[index] + std::string("i"); return array[index] + std::string("i");
else else
return std::string(1, array[index]); return std::string(1, array[index]);
} }
std::string ToBinaryStringFullySpecified(double value, double threshold, std::string ToBinaryStringFullySpecified(double value, double threshold, int precision, double one_k = 1024.0)
int precision, double one_k = 1024.0) { {
std::string mantissa; std::string mantissa;
int64_t exponent; int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent);
&exponent);
return mantissa + ExponentToPrefix(exponent, false); return mantissa + ExponentToPrefix(exponent, false);
} }
} // end namespace } // end namespace
void AppendHumanReadable(int n, std::string* str) { void AppendHumanReadable(int n, std::string *str)
{
std::stringstream ss; std::stringstream ss;
// Round down to the nearest SI prefix. // Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0); ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str(); *str += ss.str();
} }
std::string HumanReadableNumber(double n, double one_k) { std::string HumanReadableNumber(double n, double one_k)
{
// 1.1 means that figures up to 1.1k should be shown with the next unit down; // 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects. // this softens edge effects.
// 1 means that we should show one decimal place of precision. // 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
} }
std::string StrFormatImp(const char* msg, va_list args) { std::string StrFormatImp(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy // we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp; va_list args_cp;
va_copy(args_cp, args); va_copy(args_cp, args);
@ -141,7 +156,8 @@ std::string StrFormatImp(const char* msg, va_list args) {
va_end(args_cp); va_end(args_cp);
// handle empty expansion // handle empty expansion
if (ret == 0) return std::string{}; if (ret == 0)
return std::string{};
if (static_cast<std::size_t>(ret) < size) if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data()); return std::string(local_buff.data());
@ -155,7 +171,8 @@ std::string StrFormatImp(const char* msg, va_list args) {
return std::string(buff_ptr.get()); return std::string(buff_ptr.get());
} }
std::string StrFormat(const char* format, ...) { std::string StrFormat(const char *format, ...)
{
va_list args; va_list args;
va_start(args, format); va_start(args, format);
std::string tmp = StrFormatImp(format, args); std::string tmp = StrFormatImp(format, args);
@ -170,13 +187,14 @@ std::string StrFormat(const char* format, ...) {
* strtol, strtod. Note that reimplemented functions are in benchmark:: * strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace. * namespace, not std:: namespace.
*/ */
unsigned long stoul(const std::string& str, size_t* pos, int base) { unsigned long stoul(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */ /* Record previous errno */
const int oldErrno = errno; const int oldErrno = errno;
errno = 0; errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base); const unsigned long result = strtoul(strStart, &strEnd, base);
const int strtoulErrno = errno; const int strtoulErrno = errno;
@ -184,26 +202,29 @@ unsigned long stoul(const std::string& str, size_t* pos, int base) {
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtoulErrno == ERANGE) { if (strtoulErrno == ERANGE)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of unsigned long"); throw std::out_of_range("stoul failed: " + str + " is outside of range of unsigned long");
} else if (strEnd == strStart || strtoulErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
} }
if (pos != nullptr) { else if (strEnd == strStart || strtoulErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart); *pos = static_cast<size_t>(strEnd - strStart);
} }
return result; return result;
} }
int stoi(const std::string& str, size_t* pos, int base) { int stoi(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */ /* Record previous errno */
const int oldErrno = errno; const int oldErrno = errno;
errno = 0; errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const long result = strtol(strStart, &strEnd, base); const long result = strtol(strStart, &strEnd, base);
const int strtolErrno = errno; const int strtolErrno = errno;
@ -211,26 +232,29 @@ int stoi(const std::string& str, size_t* pos, int base) {
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result) { if (strtolErrno == ERANGE || long(int(result)) != result)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of int"); throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtolErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
} }
if (pos != nullptr) { else if (strEnd == strStart || strtolErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart); *pos = static_cast<size_t>(strEnd - strStart);
} }
return int(result); return int(result);
} }
double stod(const std::string& str, size_t* pos) { double stod(const std::string &str, size_t *pos)
{
/* Record previous errno */ /* Record previous errno */
const int oldErrno = errno; const int oldErrno = errno;
errno = 0; errno = 0;
const char* strStart = str.c_str(); const char *strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart); char *strEnd = const_cast<char *>(strStart);
const double result = strtod(strStart, &strEnd); const double result = strtod(strStart, &strEnd);
/* Restore previous errno */ /* Restore previous errno */
@ -238,14 +262,16 @@ double stod(const std::string& str, size_t* pos) {
errno = oldErrno; errno = oldErrno;
/* Check for errors and return */ /* Check for errors and return */
if (strtodErrno == ERANGE) { if (strtodErrno == ERANGE)
throw std::out_of_range( {
"stoul failed: " + str + " is outside of range of int"); throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtodErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
} }
if (pos != nullptr) { else if (strEnd == strStart || strtodErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart); *pos = static_cast<size_t>(strEnd - strStart);
} }
return result; return result;

View File

@ -1,14 +1,15 @@
#ifndef BENCHMARK_STRING_UTIL_H_ #ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_ #define BENCHMARK_STRING_UTIL_H_
#include "internal_macros.h"
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <utility> #include <utility>
#include "internal_macros.h"
namespace benchmark { namespace benchmark
{
void AppendHumanReadable(int n, std::string* str); void AppendHumanReadable(int n, std::string *str);
std::string HumanReadableNumber(double n, double one_k = 1024.0); std::string HumanReadableNumber(double n, double one_k = 1024.0);
@ -18,20 +19,21 @@ __attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
__attribute__((format(printf, 1, 2))) __attribute__((format(printf, 1, 2)))
#endif #endif
std::string std::string
StrFormat(const char* format, ...); StrFormat(const char *format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { inline std::ostream &StrCatImp(std::ostream &out) BENCHMARK_NOEXCEPT
{
return out; return out;
} }
template <class First, class... Rest> template <class First, class... Rest> inline std::ostream &StrCatImp(std::ostream &out, First &&f, Rest &&...rest)
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { {
out << std::forward<First>(f); out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...); return StrCatImp(out, std::forward<Rest>(rest)...);
} }
template <class... Args> template <class... Args> inline std::string StrCat(Args &&...args)
inline std::string StrCat(Args&&... args) { {
std::ostringstream ss; std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...); StrCatImp(ss, std::forward<Args>(args)...);
return ss.str(); return ss.str();
@ -44,14 +46,13 @@ inline std::string StrCat(Args&&... args) {
* strtol, strtod. Note that reimplemented functions are in benchmark:: * strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace. * namespace, not std:: namespace.
*/ */
unsigned long stoul(const std::string& str, size_t* pos = nullptr, unsigned long stoul(const std::string &str, size_t *pos = nullptr, int base = 10);
int base = 10); int stoi(const std::string &str, size_t *pos = nullptr, int base = 10);
int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); double stod(const std::string &str, size_t *pos = nullptr);
double stod(const std::string& str, size_t* pos = nullptr);
#else #else
using std::stoul;
using std::stoi;
using std::stod; using std::stod;
using std::stoi;
using std::stoul;
#endif #endif
} // end namespace benchmark } // end namespace benchmark

View File

@ -17,9 +17,9 @@
#ifdef BENCHMARK_OS_WINDOWS #ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h> #include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <codecvt>
#include <versionhelpers.h> #include <versionhelpers.h>
#include <windows.h> #include <windows.h>
#include <codecvt>
#else #else
#include <fcntl.h> #include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA #ifndef BENCHMARK_OS_FUCHSIA
@ -28,8 +28,8 @@
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h> #include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ #if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD defined BENCHMARK_OS_OPENBSD
#define BENCHMARK_HAS_SYSCTL #define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h> #include <sys/sysctl.h>
#endif #endif
@ -54,9 +54,9 @@
#include <iostream> #include <iostream>
#include <iterator> #include <iterator>
#include <limits> #include <limits>
#include <locale>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include <locale>
#include "check.h" #include "check.h"
#include "cycleclock.h" #include "cycleclock.h"
@ -65,19 +65,24 @@
#include "sleep.h" #include "sleep.h"
#include "string_util.h" #include "string_util.h"
namespace benchmark { namespace benchmark
namespace { {
namespace
{
void PrintImp(std::ostream& out) { out << std::endl; } void PrintImp(std::ostream &out)
{
out << std::endl;
}
template <class First, class... Rest> template <class First, class... Rest> void PrintImp(std::ostream &out, First &&f, Rest &&...rest)
void PrintImp(std::ostream& out, First&& f, Rest&&... rest) { {
out << std::forward<First>(f); out << std::forward<First>(f);
PrintImp(out, std::forward<Rest>(rest)...); PrintImp(out, std::forward<Rest>(rest)...);
} }
template <class... Args> template <class... Args> BENCHMARK_NORETURN void PrintErrorAndDie(Args &&...args)
BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { {
PrintImp(std::cerr, std::forward<Args>(args)...); PrintImp(std::cerr, std::forward<Args>(args)...);
std::exit(EXIT_FAILURE); std::exit(EXIT_FAILURE);
} }
@ -86,7 +91,8 @@ BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
/// ValueUnion - A type used to correctly alias the byte-for-byte output of /// ValueUnion - A type used to correctly alias the byte-for-byte output of
/// `sysctl` with the result type it's to be interpreted as. /// `sysctl` with the result type it's to be interpreted as.
struct ValueUnion { struct ValueUnion
{
union DataT { union DataT {
uint32_t uint32_value; uint32_t uint32_value;
uint64_t uint64_value; uint64_t uint64_value;
@ -100,21 +106,34 @@ struct ValueUnion {
DataPtr Buff; DataPtr Buff;
public: public:
ValueUnion() : Size(0), Buff(nullptr, &std::free) {} ValueUnion() : Size(0), Buff(nullptr, &std::free)
{
}
explicit ValueUnion(size_t BuffSize) explicit ValueUnion(size_t BuffSize)
: Size(sizeof(DataT) + BuffSize), : Size(sizeof(DataT) + BuffSize), Buff(::new (std::malloc(Size)) DataT(), &std::free)
Buff(::new (std::malloc(Size)) DataT(), &std::free) {} {
}
ValueUnion(ValueUnion&& other) = default; ValueUnion(ValueUnion &&other) = default;
explicit operator bool() const { return bool(Buff); } explicit operator bool() const
{
return bool(Buff);
}
char* data() const { return Buff->bytes; } char *data() const
{
return Buff->bytes;
}
std::string GetAsString() const { return std::string(data()); } std::string GetAsString() const
{
return std::string(data());
}
int64_t GetAsInteger() const { int64_t GetAsInteger() const
{
if (Size == sizeof(Buff->uint32_value)) if (Size == sizeof(Buff->uint32_value))
return static_cast<int32_t>(Buff->uint32_value); return static_cast<int32_t>(Buff->uint32_value);
else if (Size == sizeof(Buff->uint64_value)) else if (Size == sizeof(Buff->uint64_value))
@ -122,7 +141,8 @@ struct ValueUnion {
BENCHMARK_UNREACHABLE(); BENCHMARK_UNREACHABLE();
} }
uint64_t GetAsUnsigned() const { uint64_t GetAsUnsigned() const
{
if (Size == sizeof(Buff->uint32_value)) if (Size == sizeof(Buff->uint32_value))
return Buff->uint32_value; return Buff->uint32_value;
else if (Size == sizeof(Buff->uint64_value)) else if (Size == sizeof(Buff->uint64_value))
@ -130,8 +150,8 @@ struct ValueUnion {
BENCHMARK_UNREACHABLE(); BENCHMARK_UNREACHABLE();
} }
template <class T, int N> template <class T, int N> std::array<T, N> GetAsArray()
std::array<T, N> GetAsArray() { {
const int ArrSize = sizeof(T) * N; const int ArrSize = sizeof(T) * N;
CHECK_LE(ArrSize, Size); CHECK_LE(ArrSize, Size);
std::array<T, N> Arr; std::array<T, N> Arr;
@ -140,21 +160,27 @@ struct ValueUnion {
} }
}; };
ValueUnion GetSysctlImp(std::string const& Name) { ValueUnion GetSysctlImp(std::string const &Name)
{
#if defined BENCHMARK_OS_OPENBSD #if defined BENCHMARK_OS_OPENBSD
int mib[2]; int mib[2];
mib[0] = CTL_HW; mib[0] = CTL_HW;
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed"))
{
ValueUnion buff(sizeof(int)); ValueUnion buff(sizeof(int));
if (Name == "hw.ncpu") { if (Name == "hw.ncpu")
{
mib[1] = HW_NCPU; mib[1] = HW_NCPU;
} else { }
else
{
mib[1] = HW_CPUSPEED; mib[1] = HW_CPUSPEED;
} }
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) { if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1)
{
return ValueUnion(); return ValueUnion();
} }
return buff; return buff;
@ -173,45 +199,52 @@ ValueUnion GetSysctlImp(std::string const& Name) {
} }
BENCHMARK_MAYBE_UNUSED BENCHMARK_MAYBE_UNUSED
bool GetSysctl(std::string const& Name, std::string* Out) { bool GetSysctl(std::string const &Name, std::string *Out)
{
Out->clear(); Out->clear();
auto Buff = GetSysctlImp(Name); auto Buff = GetSysctlImp(Name);
if (!Buff) return false; if (!Buff)
return false;
Out->assign(Buff.data()); Out->assign(Buff.data());
return true; return true;
} }
template <class Tp, template <class Tp, class = typename std::enable_if<std::is_integral<Tp>::value>::type>
class = typename std::enable_if<std::is_integral<Tp>::value>::type> bool GetSysctl(std::string const &Name, Tp *Out)
bool GetSysctl(std::string const& Name, Tp* Out) { {
*Out = 0; *Out = 0;
auto Buff = GetSysctlImp(Name); auto Buff = GetSysctlImp(Name);
if (!Buff) return false; if (!Buff)
return false;
*Out = static_cast<Tp>(Buff.GetAsUnsigned()); *Out = static_cast<Tp>(Buff.GetAsUnsigned());
return true; return true;
} }
template <class Tp, size_t N> template <class Tp, size_t N> bool GetSysctl(std::string const &Name, std::array<Tp, N> *Out)
bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) { {
auto Buff = GetSysctlImp(Name); auto Buff = GetSysctlImp(Name);
if (!Buff) return false; if (!Buff)
return false;
*Out = Buff.GetAsArray<Tp, N>(); *Out = Buff.GetAsArray<Tp, N>();
return true; return true;
} }
#endif #endif
template <class ArgT> template <class ArgT> bool ReadFromFile(std::string const &fname, ArgT *arg)
bool ReadFromFile(std::string const& fname, ArgT* arg) { {
*arg = ArgT(); *arg = ArgT();
std::ifstream f(fname.c_str()); std::ifstream f(fname.c_str());
if (!f.is_open()) return false; if (!f.is_open())
return false;
f >> *arg; f >> *arg;
return f.good(); return f.good();
} }
bool CpuScalingEnabled(int num_cpus) { bool CpuScalingEnabled(int num_cpus)
{
// We don't have a valid CPU count, so don't even bother. // We don't have a valid CPU count, so don't even bother.
if (num_cpus <= 0) return false; if (num_cpus <= 0)
return false;
#ifdef BENCHMARK_OS_QNX #ifdef BENCHMARK_OS_QNX
return false; return false;
#endif #endif
@ -220,16 +253,18 @@ bool CpuScalingEnabled(int num_cpus) {
// local file system. If reading the exported files fails, then we may not be // local file system. If reading the exported files fails, then we may not be
// running on Linux, so we silently ignore all the read errors. // running on Linux, so we silently ignore all the read errors.
std::string res; std::string res;
for (int cpu = 0; cpu < num_cpus; ++cpu) { for (int cpu = 0; cpu < num_cpus; ++cpu)
std::string governor_file = {
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); std::string governor_file = StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
if (ReadFromFile(governor_file, &res) && res != "performance") return true; if (ReadFromFile(governor_file, &res) && res != "performance")
return true;
} }
#endif #endif
return false; return false;
} }
int CountSetBitsInCPUMap(std::string Val) { int CountSetBitsInCPUMap(std::string Val)
{
auto CountBits = [](std::string Part) { auto CountBits = [](std::string Part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>; using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
Part = "0x" + Part; Part = "0x" + Part;
@ -238,35 +273,40 @@ int CountSetBitsInCPUMap(std::string Val) {
}; };
size_t Pos; size_t Pos;
int total = 0; int total = 0;
while ((Pos = Val.find(',')) != std::string::npos) { while ((Pos = Val.find(',')) != std::string::npos)
{
total += CountBits(Val.substr(0, Pos)); total += CountBits(Val.substr(0, Pos));
Val = Val.substr(Pos + 1); Val = Val.substr(Pos + 1);
} }
if (!Val.empty()) { if (!Val.empty())
{
total += CountBits(Val); total += CountBits(Val);
} }
return total; return total;
} }
BENCHMARK_MAYBE_UNUSED BENCHMARK_MAYBE_UNUSED
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() { std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS()
{
std::vector<CPUInfo::CacheInfo> res; std::vector<CPUInfo::CacheInfo> res;
std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
int Idx = 0; int Idx = 0;
while (true) { while (true)
{
CPUInfo::CacheInfo info; CPUInfo::CacheInfo info;
std::string FPath = StrCat(dir, "index", Idx++, "/"); std::string FPath = StrCat(dir, "index", Idx++, "/");
std::ifstream f(StrCat(FPath, "size").c_str()); std::ifstream f(StrCat(FPath, "size").c_str());
if (!f.is_open()) break; if (!f.is_open())
break;
std::string suffix; std::string suffix;
f >> info.size; f >> info.size;
if (f.fail()) if (f.fail())
PrintErrorAndDie("Failed while reading file '", FPath, "size'"); PrintErrorAndDie("Failed while reading file '", FPath, "size'");
if (f.good()) { if (f.good())
{
f >> suffix; f >> suffix;
if (f.bad()) if (f.bad())
PrintErrorAndDie( PrintErrorAndDie("Invalid cache size format: failed to read size suffix");
"Invalid cache size format: failed to read size suffix");
else if (f && suffix != "K") else if (f && suffix != "K")
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
else if (suffix == "K") else if (suffix == "K")
@ -287,12 +327,14 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
} }
#ifdef BENCHMARK_OS_MACOSX #ifdef BENCHMARK_OS_MACOSX
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() { std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX()
{
std::vector<CPUInfo::CacheInfo> res; std::vector<CPUInfo::CacheInfo> res;
std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}}; std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
GetSysctl("hw.cacheconfig", &CacheCounts); GetSysctl("hw.cacheconfig", &CacheCounts);
struct { struct
{
std::string name; std::string name;
std::string type; std::string type;
int level; int level;
@ -301,9 +343,11 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, {"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
{"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
for (auto& C : Cases) { for (auto &C : Cases)
{
int val; int val;
if (!GetSysctl(C.name, &val)) continue; if (!GetSysctl(C.name, &val))
continue;
CPUInfo::CacheInfo info; CPUInfo::CacheInfo info;
info.type = C.type; info.type = C.type;
info.level = C.level; info.level = C.level;
@ -314,7 +358,8 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
return res; return res;
} }
#elif defined(BENCHMARK_OS_WINDOWS) #elif defined(BENCHMARK_OS_WINDOWS)
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() { std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows()
{
std::vector<CPUInfo::CacheInfo> res; std::vector<CPUInfo::CacheInfo> res;
DWORD buffer_size = 0; DWORD buffer_size = 0;
using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
@ -322,26 +367,29 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>; using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
GetLogicalProcessorInformation(nullptr, &buffer_size); GetLogicalProcessorInformation(nullptr, &buffer_size);
UPtr buff((PInfo*)malloc(buffer_size), &std::free); UPtr buff((PInfo *)malloc(buffer_size), &std::free);
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", GetLastError());
GetLastError());
PInfo* it = buff.get(); PInfo *it = buff.get();
PInfo* end = buff.get() + (buffer_size / sizeof(PInfo)); PInfo *end = buff.get() + (buffer_size / sizeof(PInfo));
for (; it != end; ++it) { for (; it != end; ++it)
if (it->Relationship != RelationCache) continue; {
if (it->Relationship != RelationCache)
continue;
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>; using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
BitSet B(it->ProcessorMask); BitSet B(it->ProcessorMask);
// To prevent duplicates, only consider caches where CPU 0 is specified // To prevent duplicates, only consider caches where CPU 0 is specified
if (!B.test(0)) continue; if (!B.test(0))
CInfo* Cache = &it->Cache; continue;
CInfo *Cache = &it->Cache;
CPUInfo::CacheInfo C; CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(B.count()); C.num_sharing = static_cast<int>(B.count());
C.level = Cache->Level; C.level = Cache->Level;
C.size = Cache->Size; C.size = Cache->Size;
switch (Cache->Type) { switch (Cache->Type)
{
case CacheUnified: case CacheUnified:
C.type = "Unified"; C.type = "Unified";
break; break;
@ -363,29 +411,32 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
return res; return res;
} }
#elif BENCHMARK_OS_QNX #elif BENCHMARK_OS_QNX
std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() { std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX()
{
std::vector<CPUInfo::CacheInfo> res; std::vector<CPUInfo::CacheInfo> res;
struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr); struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ; int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize;
for(int i = 0; i < num; ++i ) { for (int i = 0; i < num; ++i)
{
CPUInfo::CacheInfo info; CPUInfo::CacheInfo info;
switch (cache->flags){ switch (cache->flags)
case CACHE_FLAG_INSTR : {
case CACHE_FLAG_INSTR:
info.type = "Instruction"; info.type = "Instruction";
info.level = 1; info.level = 1;
break; break;
case CACHE_FLAG_DATA : case CACHE_FLAG_DATA:
info.type = "Data"; info.type = "Data";
info.level = 1; info.level = 1;
break; break;
case CACHE_FLAG_UNIFIED : case CACHE_FLAG_UNIFIED:
info.type = "Unified"; info.type = "Unified";
info.level = 2; info.level = 2;
case CACHE_FLAG_SHARED : case CACHE_FLAG_SHARED:
info.type = "Shared"; info.type = "Shared";
info.level = 3; info.level = 3;
default : default:
continue; continue;
break; break;
} }
@ -398,7 +449,8 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
} }
#endif #endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes() { std::vector<CPUInfo::CacheInfo> GetCacheSizes()
{
#ifdef BENCHMARK_OS_MACOSX #ifdef BENCHMARK_OS_MACOSX
return GetCacheSizesMacOSX(); return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS) #elif defined(BENCHMARK_OS_WINDOWS)
@ -410,10 +462,11 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
#endif #endif
} }
std::string GetSystemName() { std::string GetSystemName()
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
std::string str; std::string str;
const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1; const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1;
TCHAR hostname[COUNT] = {'\0'}; TCHAR hostname[COUNT] = {'\0'};
DWORD DWCOUNT = COUNT; DWORD DWCOUNT = COUNT;
if (!GetComputerName(hostname, &DWCOUNT)) if (!GetComputerName(hostname, &DWCOUNT))
@ -421,7 +474,7 @@ std::string GetSystemName() {
#ifndef UNICODE #ifndef UNICODE
str = std::string(hostname, DWCOUNT); str = std::string(hostname, DWCOUNT);
#else #else
//Using wstring_convert, Is deprecated in C++17 // Using wstring_convert, Is deprecated in C++17
using convert_type = std::codecvt_utf8<wchar_t>; using convert_type = std::codecvt_utf8<wchar_t>;
std::wstring_convert<convert_type, wchar_t> converter; std::wstring_convert<convert_type, wchar_t> converter;
std::wstring wStr(hostname, DWCOUNT); std::wstring wStr(hostname, DWCOUNT);
@ -445,15 +498,18 @@ std::string GetSystemName() {
#endif // def HOST_NAME_MAX #endif // def HOST_NAME_MAX
char hostname[HOST_NAME_MAX]; char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX); int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0) return std::string(""); if (retVal != 0)
return std::string("");
return std::string(hostname); return std::string(hostname);
#endif // Catch-all POSIX block. #endif // Catch-all POSIX block.
} }
int GetNumCPUs() { int GetNumCPUs()
{
#ifdef BENCHMARK_HAS_SYSCTL #ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1; int NumCPU = -1;
if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; if (GetSysctl("hw.ncpu", &NumCPU))
return NumCPU;
fprintf(stderr, "Err: %s\n", strerror(errno)); fprintf(stderr, "Err: %s\n", strerror(errno));
std::exit(EXIT_FAILURE); std::exit(EXIT_FAILURE);
#elif defined(BENCHMARK_OS_WINDOWS) #elif defined(BENCHMARK_OS_WINDOWS)
@ -468,10 +524,9 @@ int GetNumCPUs() {
#elif defined(BENCHMARK_OS_SOLARIS) #elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure. // Returns -1 in case of a failure.
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
if (NumCPU < 0) { if (NumCPU < 0)
fprintf(stderr, {
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno));
strerror(errno));
} }
return NumCPU; return NumCPU;
#elif defined(BENCHMARK_OS_QNX) #elif defined(BENCHMARK_OS_QNX)
@ -480,44 +535,53 @@ int GetNumCPUs() {
int NumCPUs = 0; int NumCPUs = 0;
int MaxID = -1; int MaxID = -1;
std::ifstream f("/proc/cpuinfo"); std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) { if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n"; std::cerr << "failed to open /proc/cpuinfo\n";
return -1; return -1;
} }
const std::string Key = "processor"; const std::string Key = "processor";
std::string ln; std::string ln;
while (std::getline(f, ln)) { while (std::getline(f, ln))
if (ln.empty()) continue; {
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':'); size_t SplitIdx = ln.find(':');
std::string value; std::string value;
#if defined(__s390__) #if defined(__s390__)
// s390 has another format in /proc/cpuinfo // s390 has another format in /proc/cpuinfo
// it needs to be parsed differently // it needs to be parsed differently
if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1); if (SplitIdx != std::string::npos)
value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1);
#else #else
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
#endif #endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0)
{
NumCPUs++; NumCPUs++;
if (!value.empty()) { if (!value.empty())
{
int CurID = benchmark::stoi(value); int CurID = benchmark::stoi(value);
MaxID = std::max(CurID, MaxID); MaxID = std::max(CurID, MaxID);
} }
} }
} }
if (f.bad()) { if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n"; std::cerr << "Failure reading /proc/cpuinfo\n";
return -1; return -1;
} }
if (!f.eof()) { if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n"; std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return -1; return -1;
} }
f.close(); f.close();
if ((MaxID + 1) != NumCPUs) { if ((MaxID + 1) != NumCPUs)
fprintf(stderr, {
"CPU ID assignments in /proc/cpuinfo seem messed up." fprintf(stderr, "CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n"); " This is usually caused by a bad BIOS.\n");
} }
return NumCPUs; return NumCPUs;
@ -525,7 +589,8 @@ int GetNumCPUs() {
BENCHMARK_UNREACHABLE(); BENCHMARK_UNREACHABLE();
} }
double GetCPUCyclesPerSecond() { double GetCPUCyclesPerSecond()
{
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN #if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
long freq; long freq;
@ -538,8 +603,8 @@ double GetCPUCyclesPerSecond() {
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
// If CPU scaling is in effect, we want to use the *maximum* frequency, // If CPU scaling is in effect, we want to use the *maximum* frequency,
// not whatever CPU speed some random processor happens to be using now. // not whatever CPU speed some random processor happens to be using now.
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", &freq))
&freq)) { {
// The value is in kHz (as the file name suggests). For example, on a // The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000". // 2GHz warpstation, the file contains the value "2000000".
return freq * 1000.0; return freq * 1000.0;
@ -549,45 +614,57 @@ double GetCPUCyclesPerSecond() {
double bogo_clock = error_value; double bogo_clock = error_value;
std::ifstream f("/proc/cpuinfo"); std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) { if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n"; std::cerr << "failed to open /proc/cpuinfo\n";
return error_value; return error_value;
} }
auto startsWithKey = [](std::string const& Value, std::string const& Key) { auto startsWithKey = [](std::string const &Value, std::string const &Key) {
if (Key.size() > Value.size()) return false; if (Key.size() > Value.size())
auto Cmp = [&](char X, char Y) { return false;
return std::tolower(X) == std::tolower(Y); auto Cmp = [&](char X, char Y) { return std::tolower(X) == std::tolower(Y); };
};
return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp); return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
}; };
std::string ln; std::string ln;
while (std::getline(f, ln)) { while (std::getline(f, ln))
if (ln.empty()) continue; {
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':'); size_t SplitIdx = ln.find(':');
std::string value; std::string value;
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero, // accept positive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init. // which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz")) { if (startsWithKey(ln, "cpu MHz"))
if (!value.empty()) { {
if (!value.empty())
{
double cycles_per_second = benchmark::stod(value) * 1000000.0; double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0) return cycles_per_second; if (cycles_per_second > 0)
return cycles_per_second;
} }
} else if (startsWithKey(ln, "bogomips")) { }
if (!value.empty()) { else if (startsWithKey(ln, "bogomips"))
{
if (!value.empty())
{
bogo_clock = benchmark::stod(value) * 1000000.0; bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0) bogo_clock = error_value; if (bogo_clock < 0.0)
bogo_clock = error_value;
} }
} }
} }
if (f.bad()) { if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n"; std::cerr << "Failure reading /proc/cpuinfo\n";
return error_value; return error_value;
} }
if (!f.eof()) { if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n"; std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return error_value; return error_value;
} }
@ -595,10 +672,11 @@ double GetCPUCyclesPerSecond() {
// If we found the bogomips clock, but nothing better, we'll use it (but // If we found the bogomips clock, but nothing better, we'll use it (but
// we're not happy about it); otherwise, fallback to the rough estimation // we're not happy about it); otherwise, fallback to the rough estimation
// below. // below.
if (bogo_clock >= 0.0) return bogo_clock; if (bogo_clock >= 0.0)
return bogo_clock;
#elif defined BENCHMARK_HAS_SYSCTL #elif defined BENCHMARK_HAS_SYSCTL
constexpr auto* FreqStr = constexpr auto *FreqStr =
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) #if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
"machdep.tsc_freq"; "machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD #elif defined BENCHMARK_OS_OPENBSD
@ -608,56 +686,56 @@ double GetCPUCyclesPerSecond() {
#endif #endif
unsigned long long hz = 0; unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD #if defined BENCHMARK_OS_OPENBSD
if (GetSysctl(FreqStr, &hz)) return hz * 1000000; if (GetSysctl(FreqStr, &hz))
return hz * 1000000;
#else #else
if (GetSysctl(FreqStr, &hz)) return hz; if (GetSysctl(FreqStr, &hz))
return hz;
#endif #endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", FreqStr, strerror(errno));
FreqStr, strerror(errno));
#elif defined BENCHMARK_OS_WINDOWS #elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x // In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate. // then make a crude estimate.
DWORD data, data_size = sizeof(data); DWORD data, data_size = sizeof(data);
if (IsWindowsXPOrGreater() && if (IsWindowsXPOrGreater() &&
SUCCEEDED( SUCCEEDED(SHGetValueA(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", "~MHz", nullptr,
SHGetValueA(HKEY_LOCAL_MACHINE, &data, &data_size)))
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", return static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
"~MHz", nullptr, &data, &data_size))) #elif defined(BENCHMARK_OS_SOLARIS)
return static_cast<double>((int64_t)data *
(int64_t)(1000 * 1000)); // was mhz
#elif defined (BENCHMARK_OS_SOLARIS)
kstat_ctl_t *kc = kstat_open(); kstat_ctl_t *kc = kstat_open();
if (!kc) { if (!kc)
{
std::cerr << "failed to open /dev/kstat\n"; std::cerr << "failed to open /dev/kstat\n";
return -1; return -1;
} }
kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); kstat_t *ksp = kstat_lookup(kc, (char *)"cpu_info", -1, (char *)"cpu_info0");
if (!ksp) { if (!ksp)
{
std::cerr << "failed to lookup in /dev/kstat\n"; std::cerr << "failed to lookup in /dev/kstat\n";
return -1; return -1;
} }
if (kstat_read(kc, ksp, NULL) < 0) { if (kstat_read(kc, ksp, NULL) < 0)
{
std::cerr << "failed to read from /dev/kstat\n"; std::cerr << "failed to read from /dev/kstat\n";
return -1; return -1;
} }
kstat_named_t *knp = kstat_named_t *knp = (kstat_named_t *)kstat_data_lookup(ksp, (char *)"current_clock_Hz");
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); if (!knp)
if (!knp) { {
std::cerr << "failed to lookup data in /dev/kstat\n"; std::cerr << "failed to lookup data in /dev/kstat\n";
return -1; return -1;
} }
if (knp->data_type != KSTAT_DATA_UINT64) { if (knp->data_type != KSTAT_DATA_UINT64)
std::cerr << "current_clock_Hz is of unexpected data type: " {
<< knp->data_type << "\n"; std::cerr << "current_clock_Hz is of unexpected data type: " << knp->data_type << "\n";
return -1; return -1;
} }
double clock_hz = knp->value.ui64; double clock_hz = knp->value.ui64;
kstat_close(kc); kstat_close(kc);
return clock_hz; return clock_hz;
#elif defined (BENCHMARK_OS_QNX) #elif defined(BENCHMARK_OS_QNX)
return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * (int64_t)(1000 * 1000));
(int64_t)(1000 * 1000));
#endif #endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate. // If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000; const int estimate_time_ms = 1000;
@ -666,16 +744,20 @@ double GetCPUCyclesPerSecond() {
return static_cast<double>(cycleclock::Now() - start_ticks); return static_cast<double>(cycleclock::Now() - start_ticks);
} }
std::vector<double> GetLoadAvg() { std::vector<double> GetLoadAvg()
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ {
defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ #if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__) defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD) && \
!defined(__ANDROID__)
constexpr int kMaxSamples = 3; constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0); std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples); const int nelem = getloadavg(res.data(), kMaxSamples);
if (nelem < 1) { if (nelem < 1)
{
res.clear(); res.clear();
} else { }
else
{
res.resize(nelem); res.resize(nelem);
} }
return res; return res;
@ -686,23 +768,25 @@ std::vector<double> GetLoadAvg() {
} // end namespace } // end namespace
const CPUInfo& CPUInfo::Get() { const CPUInfo &CPUInfo::Get()
static const CPUInfo* info = new CPUInfo(); {
static const CPUInfo *info = new CPUInfo();
return *info; return *info;
} }
CPUInfo::CPUInfo() CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()), : num_cpus(GetNumCPUs()), cycles_per_second(GetCPUCyclesPerSecond()), caches(GetCacheSizes()),
cycles_per_second(GetCPUCyclesPerSecond()), scaling_enabled(CpuScalingEnabled(num_cpus)), load_avg(GetLoadAvg())
caches(GetCacheSizes()), {
scaling_enabled(CpuScalingEnabled(num_cpus)), }
load_avg(GetLoadAvg()) {}
const SystemInfo &SystemInfo::Get()
const SystemInfo& SystemInfo::Get() { {
static const SystemInfo* info = new SystemInfo(); static const SystemInfo *info = new SystemInfo();
return *info; return *info;
} }
SystemInfo::SystemInfo() : name(GetSystemName()) {} SystemInfo::SystemInfo() : name(GetSystemName())
{
}
} // end namespace benchmark } // end namespace benchmark

View File

@ -6,38 +6,47 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "mutex.h" #include "mutex.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
class ThreadManager { class ThreadManager
{
public: public:
explicit ThreadManager(int num_threads) explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {} {
}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { Mutex &GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_)
{
return benchmark_mutex_; return benchmark_mutex_;
} }
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) { bool StartStopBarrier() EXCLUDES(end_cond_mutex_)
{
return start_stop_barrier_.wait(); return start_stop_barrier_.wait();
} }
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) { void NotifyThreadComplete() EXCLUDES(end_cond_mutex_)
{
start_stop_barrier_.removeThread(); start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) { if (--alive_threads_ == 0)
{
MutexLock lock(end_cond_mutex_); MutexLock lock(end_cond_mutex_);
end_condition_.notify_all(); end_condition_.notify_all();
} }
} }
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) { void WaitForAllThreads() EXCLUDES(end_cond_mutex_)
{
MutexLock lock(end_cond_mutex_); MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(), end_condition_.wait(lock.native_handle(), [this]() { return alive_threads_ == 0; });
[this]() { return alive_threads_ == 0; });
} }
public: public:
struct Result { struct Result
{
IterationCount iterations = 0; IterationCount iterations = 0;
double real_time_used = 0; double real_time_used = 0;
double cpu_time_used = 0; double cpu_time_used = 0;

View File

@ -4,65 +4,83 @@
#include "check.h" #include "check.h"
#include "timers.h" #include "timers.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace internal
{
class ThreadTimer { class ThreadTimer
explicit ThreadTimer(bool measure_process_cpu_time_) {
: measure_process_cpu_time(measure_process_cpu_time_) {} explicit ThreadTimer(bool measure_process_cpu_time_) : measure_process_cpu_time(measure_process_cpu_time_)
{
}
public: public:
static ThreadTimer Create() { static ThreadTimer Create()
{
return ThreadTimer(/*measure_process_cpu_time_=*/false); return ThreadTimer(/*measure_process_cpu_time_=*/false);
} }
static ThreadTimer CreateProcessCpuTime() { static ThreadTimer CreateProcessCpuTime()
{
return ThreadTimer(/*measure_process_cpu_time_=*/true); return ThreadTimer(/*measure_process_cpu_time_=*/true);
} }
// Called by each thread // Called by each thread
void StartTimer() { void StartTimer()
{
running_ = true; running_ = true;
start_real_time_ = ChronoClockNow(); start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice(); start_cpu_time_ = ReadCpuTimerOfChoice();
} }
// Called by each thread // Called by each thread
void StopTimer() { void StopTimer()
{
CHECK(running_); CHECK(running_);
running_ = false; running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_; real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative // Floating point error can result in the subtraction producing a negative
// time. Guard against that. // time. Guard against that.
cpu_time_used_ += cpu_time_used_ += std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
} }
// Called by each thread // Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; } void SetIterationTime(double seconds)
{
manual_time_used_ += seconds;
}
bool running() const { return running_; } bool running() const
{
return running_;
}
// REQUIRES: timer is not running // REQUIRES: timer is not running
double real_time_used() const { double real_time_used() const
{
CHECK(!running_); CHECK(!running_);
return real_time_used_; return real_time_used_;
} }
// REQUIRES: timer is not running // REQUIRES: timer is not running
double cpu_time_used() const { double cpu_time_used() const
{
CHECK(!running_); CHECK(!running_);
return cpu_time_used_; return cpu_time_used_;
} }
// REQUIRES: timer is not running // REQUIRES: timer is not running
double manual_time_used() const { double manual_time_used() const
{
CHECK(!running_); CHECK(!running_);
return manual_time_used_; return manual_time_used_;
} }
private: private:
double ReadCpuTimerOfChoice() const { double ReadCpuTimerOfChoice() const
if (measure_process_cpu_time) return ProcessCPUUsage(); {
if (measure_process_cpu_time)
return ProcessCPUUsage();
return ThreadCPUUsage(); return ThreadCPUUsage();
} }

View File

@ -57,64 +57,65 @@
#include "sleep.h" #include "sleep.h"
#include "string_util.h" #include "string_util.h"
namespace benchmark { namespace benchmark
{
// Suppress unused warnings on helper functions. // Suppress unused warnings on helper functions.
#if defined(__GNUC__) #if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function" #pragma GCC diagnostic ignored "-Wunused-function"
#endif #endif
namespace { namespace
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { double MakeTime(FILETIME const &kernel_time, FILETIME const &user_time)
{
ULARGE_INTEGER kernel; ULARGE_INTEGER kernel;
ULARGE_INTEGER user; ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime; kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime; kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime; user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime; user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) + return (static_cast<double>(kernel.QuadPart) + static_cast<double>(user.QuadPart)) * 1e-7;
static_cast<double>(user.QuadPart)) *
1e-7;
} }
#elif !defined(BENCHMARK_OS_FUCHSIA) #elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const& ru) { double MakeTime(struct rusage const &ru)
return (static_cast<double>(ru.ru_utime.tv_sec) + {
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 + return (static_cast<double>(ru.ru_utime.tv_sec) + static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
} }
#endif #endif
#if defined(BENCHMARK_OS_MACOSX) #if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) { double MakeTime(thread_basic_info_data_t const &info)
return (static_cast<double>(info.user_time.seconds) + {
static_cast<double>(info.user_time.microseconds) * 1e-6 + return (static_cast<double>(info.user_time.seconds) + static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.microseconds) * 1e-6);
static_cast<double>(info.system_time.microseconds) * 1e-6);
} }
#endif #endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) #if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) { double MakeTime(struct timespec const &ts)
{
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9); return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
} }
#endif #endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { BENCHMARK_NORETURN static void DiagnoseAndExit(const char *msg)
{
std::cerr << "ERROR: " << msg << std::endl; std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE); std::exit(EXIT_FAILURE);
} }
} // end namespace } // end namespace
double ProcessCPUUsage() { double ProcessCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess(); HANDLE proc = GetCurrentProcess();
FILETIME creation_time; FILETIME creation_time;
FILETIME exit_time; FILETIME exit_time;
FILETIME kernel_time; FILETIME kernel_time;
FILETIME user_time; FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, &user_time))
&user_time))
return MakeTime(kernel_time, user_time); return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed"); DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN) #elif defined(BENCHMARK_OS_EMSCRIPTEN)
@ -132,20 +133,21 @@ double ProcessCPUUsage() {
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else #else
struct rusage ru; struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); if (getrusage(RUSAGE_SELF, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif #endif
} }
double ThreadCPUUsage() { double ThreadCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread(); HANDLE this_thread = GetCurrentThread();
FILETIME creation_time; FILETIME creation_time;
FILETIME exit_time; FILETIME exit_time;
FILETIME kernel_time; FILETIME kernel_time;
FILETIME user_time; FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time);
&user_time);
return MakeTime(kernel_time, user_time); return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX) #elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
@ -153,8 +155,8 @@ double ThreadCPUUsage() {
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info; thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self()); mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == KERN_SUCCESS)
KERN_SUCCESS) { {
return MakeTime(info); return MakeTime(info);
} }
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
@ -167,36 +169,42 @@ double ThreadCPUUsage() {
return ProcessCPUUsage(); return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS) #elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru; struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru); if (getrusage(RUSAGE_LWP, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed"); DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID) #elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts; struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0)
return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else #else
#error Per-thread timing is not available on your system. #error Per-thread timing is not available on your system.
#endif #endif
} }
namespace { namespace
{
std::string DateTimeString(bool local) { std::string DateTimeString(bool local)
{
typedef std::chrono::system_clock Clock; typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now()); std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128; const std::size_t kStorageSize = 128;
char storage[kStorageSize]; char storage[kStorageSize];
std::size_t written; std::size_t written;
if (local) { if (local)
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
written = written = std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else #else
std::tm timeinfo; std::tm timeinfo;
::localtime_r(&now, &timeinfo); ::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif #endif
} else { }
else
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else #else
@ -212,6 +220,9 @@ std::string DateTimeString(bool local) {
} // end namespace } // end namespace
std::string LocalDateTimeString() { return DateTimeString(true); } std::string LocalDateTimeString()
{
return DateTimeString(true);
}
} // end namespace benchmark } // end namespace benchmark

View File

@ -4,7 +4,8 @@
#include <chrono> #include <chrono>
#include <string> #include <string>
namespace benchmark { namespace benchmark
{
// Return the CPU usage of the current process // Return the CPU usage of the current process
double ProcessCPUUsage(); double ProcessCPUUsage();
@ -16,18 +17,19 @@ double ChildrenCPUUsage();
double ThreadCPUUsage(); double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK) #if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> struct ChooseSteadyClock
struct ChooseSteadyClock { {
typedef std::chrono::high_resolution_clock type; typedef std::chrono::high_resolution_clock type;
}; };
template <> template <> struct ChooseSteadyClock<false>
struct ChooseSteadyClock<false> { {
typedef std::chrono::steady_clock type; typedef std::chrono::steady_clock type;
}; };
#endif #endif
struct ChooseClockType { struct ChooseClockType
{
#if defined(HAVE_STEADY_CLOCK) #if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type; typedef ChooseSteadyClock<>::type type;
#else #else
@ -35,7 +37,8 @@ struct ChooseClockType {
#endif #endif
}; };
inline double ChronoClockNow() { inline double ChronoClockNow()
{
typedef ChooseClockType::type ClockType; typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>; using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count(); return FpSeconds(ClockType::now().time_since_epoch()).count();

View File

@ -3,17 +3,22 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
} }
BENCHMARK(BM_empty); BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu(); BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) { void BM_spin_empty(benchmark::State &state)
for (auto _ : state) { {
for (int x = 0; x < state.range(0); ++x) { for (auto _ : state)
{
for (int x = 0; x < state.range(0); ++x)
{
benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(x);
} }
} }
@ -21,12 +26,16 @@ void BM_spin_empty(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_empty); BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) { void BM_spin_pause_before(benchmark::State &state)
for (int i = 0; i < state.range(0); ++i) { {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
for (auto _ : state) { for (auto _ : state)
for (int i = 0; i < state.range(0); ++i) { {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
@ -34,14 +43,18 @@ void BM_spin_pause_before(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_pause_before); BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) { void BM_spin_pause_during(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
state.PauseTiming(); state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) { for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
state.ResumeTiming(); state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) { for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
@ -49,8 +62,10 @@ void BM_spin_pause_during(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_pause_during); BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) { void BM_pause_during(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
state.PauseTiming(); state.PauseTiming();
state.ResumeTiming(); state.ResumeTiming();
} }
@ -60,67 +75,83 @@ BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime(); BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) { void BM_spin_pause_after(benchmark::State &state)
for (auto _ : state) { {
for (int i = 0; i < state.range(0); ++i) { for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
for (int i = 0; i < state.range(0); ++i) { for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_after); BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) { void BM_spin_pause_before_and_after(benchmark::State &state)
for (int i = 0; i < state.range(0); ++i) { {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
for (auto _ : state) { for (auto _ : state)
for (int i = 0; i < state.range(0); ++i) { {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
for (int i = 0; i < state.range(0); ++i) { for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i); benchmark::DoNotOptimize(i);
} }
} }
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) { void BM_empty_stop_start(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State &state)
void BM_KeepRunning(benchmark::State& state) { {
benchmark::IterationCount iter_count = 0; benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations()); assert(iter_count == state.iterations());
while (state.KeepRunning()) { while (state.KeepRunning())
{
++iter_count; ++iter_count;
} }
assert(iter_count == state.iterations()); assert(iter_count == state.iterations());
} }
BENCHMARK(BM_KeepRunning); BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) { void BM_KeepRunningBatch(benchmark::State &state)
{
// Choose a prime batch size to avoid evenly dividing max_iterations. // Choose a prime batch size to avoid evenly dividing max_iterations.
const benchmark::IterationCount batch_size = 101; const benchmark::IterationCount batch_size = 101;
benchmark::IterationCount iter_count = 0; benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size)) { while (state.KeepRunningBatch(batch_size))
{
iter_count += batch_size; iter_count += batch_size;
} }
assert(state.iterations() == iter_count); assert(state.iterations() == iter_count);
} }
BENCHMARK(BM_KeepRunningBatch); BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State& state) { void BM_RangedFor(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0; benchmark::IterationCount iter_count = 0;
for (auto _ : state) { for (auto _ : state)
{
++iter_count; ++iter_count;
} }
assert(iter_count == state.max_iterations); assert(iter_count == state.max_iterations);
@ -129,8 +160,8 @@ BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to // Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits. // instantiate std::iterator_traits.
static_assert(std::is_same< static_assert(std::is_same<typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename std::iterator_traits<benchmark::State::StateIterator>::value_type, typename benchmark::State::StateIterator::value_type>::value,
typename benchmark::State::StateIterator::value_type>::value, ""); "");
BENCHMARK_MAIN(); BENCHMARK_MAIN();

View File

@ -4,99 +4,115 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace benchmark { namespace benchmark
namespace internal { {
namespace { namespace internal
{
namespace
{
TEST(AddRangeTest, Simple) { TEST(AddRangeTest, Simple)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, 1, 2, 2); AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2)); EXPECT_THAT(dst, testing::ElementsAre(1, 2));
} }
TEST(AddRangeTest, Simple64) { TEST(AddRangeTest, Simple64)
{
std::vector<int64_t> dst; std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2); AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2)); EXPECT_THAT(dst, testing::ElementsAre(1, 2));
} }
TEST(AddRangeTest, Advanced) { TEST(AddRangeTest, Advanced)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, 5, 15, 2); AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
} }
TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, Advanced64)
{
std::vector<int64_t> dst; std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2); AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
} }
TEST(AddRangeTest, FullRange8) { TEST(AddRangeTest, FullRange8)
{
std::vector<int8_t> dst; std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8); AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
} }
TEST(AddRangeTest, FullRange64) { TEST(AddRangeTest, FullRange64)
{
std::vector<int64_t> dst; std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024); AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT( EXPECT_THAT(dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL)); 1152921504606846976LL, 9223372036854775807LL));
} }
TEST(AddRangeTest, NegativeRanges) { TEST(AddRangeTest, NegativeRanges)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -8, 0, 2); AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0)); EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
} }
TEST(AddRangeTest, StrictlyNegative) { TEST(AddRangeTest, StrictlyNegative)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -8, -1, 2); AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1)); EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
} }
TEST(AddRangeTest, SymmetricNegativeRanges) { TEST(AddRangeTest, SymmetricNegativeRanges)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -8, 8, 2); AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8)); EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
} }
TEST(AddRangeTest, SymmetricNegativeRangesOddMult) { TEST(AddRangeTest, SymmetricNegativeRangesOddMult)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -30, 32, 5); AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32)); EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
} }
TEST(AddRangeTest, NegativeRangesAsymmetric) { TEST(AddRangeTest, NegativeRangesAsymmetric)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -3, 5, 2); AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5)); EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
} }
TEST(AddRangeTest, NegativeRangesLargeStep) { TEST(AddRangeTest, NegativeRangesLargeStep)
{
// Always include -1, 0, 1 when crossing zero. // Always include -1, 0, 1 when crossing zero.
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, -8, 8, 10); AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8)); EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
} }
TEST(AddRangeTest, ZeroOnlyRange) { TEST(AddRangeTest, ZeroOnlyRange)
{
std::vector<int> dst; std::vector<int> dst;
AddRange(&dst, 0, 0, 2); AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0)); EXPECT_THAT(dst, testing::ElementsAre(0));
} }
TEST(AddRangeTest, NegativeRange64) { TEST(AddRangeTest, NegativeRange64)
{
std::vector<int64_t> dst; std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2); AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4)); EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
} }
TEST(AddRangeTest, NegativeRangePreservesExistingOrder) { TEST(AddRangeTest, NegativeRangePreservesExistingOrder)
{
// If elements already exist in the range, ensure we don't change // If elements already exist in the range, ensure we don't change
// their ordering by adding negative values. // their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3}; std::vector<int64_t> dst = {1, 2, 3};
@ -104,20 +120,20 @@ TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2)); EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
} }
TEST(AddRangeTest, FullNegativeRange64) { TEST(AddRangeTest, FullNegativeRange64)
{
std::vector<int64_t> dst; std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min(); const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max(); const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024); AddRange(&dst, min, max, 1024);
EXPECT_THAT( EXPECT_THAT(dst, testing::ElementsAreArray(std::vector<int64_t>{
dst, testing::ElementsAreArray(std::vector<int64_t>{ min, -1152921504606846976LL, -1125899906842624LL, -1099511627776LL, -1073741824LL, -1048576LL,
min, -1152921504606846976LL, -1125899906842624LL, -1024LL, -1LL, 0LL, 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
-1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL, 1152921504606846976LL, max}));
1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
1125899906842624LL, 1152921504606846976LL, max}));
} }
TEST(AddRangeTest, Simple8) { TEST(AddRangeTest, Simple8)
{
std::vector<int8_t> dst; std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2); AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));

View File

@ -1,30 +1,35 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
{
using namespace benchmark; using namespace benchmark;
using namespace benchmark::internal; using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty) { TEST(BenchmarkNameTest, Empty)
{
const auto name = BenchmarkName(); const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string()); EXPECT_EQ(name.str(), std::string());
} }
TEST(BenchmarkNameTest, FunctionName) { TEST(BenchmarkNameTest, FunctionName)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name"); EXPECT_EQ(name.str(), "function_name");
} }
TEST(BenchmarkNameTest, FunctionNameAndArgs) { TEST(BenchmarkNameTest, FunctionNameAndArgs)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.args = "some_args:3/4/5"; name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5"); EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
} }
TEST(BenchmarkNameTest, MinTime) { TEST(BenchmarkNameTest, MinTime)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.args = "some_args:3/4"; name.args = "some_args:3/4";
@ -32,7 +37,8 @@ TEST(BenchmarkNameTest, MinTime) {
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
} }
TEST(BenchmarkNameTest, Iterations) { TEST(BenchmarkNameTest, Iterations)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.min_time = "min_time:3.4s"; name.min_time = "min_time:3.4s";
@ -40,7 +46,8 @@ TEST(BenchmarkNameTest, Iterations) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42"); EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
} }
TEST(BenchmarkNameTest, Repetitions) { TEST(BenchmarkNameTest, Repetitions)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.min_time = "min_time:3.4s"; name.min_time = "min_time:3.4s";
@ -48,7 +55,8 @@ TEST(BenchmarkNameTest, Repetitions) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24"); EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
} }
TEST(BenchmarkNameTest, TimeType) { TEST(BenchmarkNameTest, TimeType)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.min_time = "min_time:3.4s"; name.min_time = "min_time:3.4s";
@ -56,7 +64,8 @@ TEST(BenchmarkNameTest, TimeType) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time"); EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
} }
TEST(BenchmarkNameTest, Threads) { TEST(BenchmarkNameTest, Threads)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.function_name = "function_name"; name.function_name = "function_name";
name.min_time = "min_time:3.4s"; name.min_time = "min_time:3.4s";
@ -64,7 +73,8 @@ TEST(BenchmarkNameTest, Threads) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256"); EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
} }
TEST(BenchmarkNameTest, TestEmptyFunctionName) { TEST(BenchmarkNameTest, TestEmptyFunctionName)
{
auto name = BenchmarkName(); auto name = BenchmarkName();
name.args = "first:3/second:4"; name.args = "first:3/second:4";
name.threads = "threads:22"; name.threads = "threads:22";

View File

@ -24,15 +24,19 @@
#define BENCHMARK_NOINLINE #define BENCHMARK_NOINLINE
#endif #endif
namespace { namespace
{
int BENCHMARK_NOINLINE Factorial(uint32_t n) { int BENCHMARK_NOINLINE Factorial(uint32_t n)
{
return (n == 1) ? 1 : n * Factorial(n - 1); return (n == 1) ? 1 : n * Factorial(n - 1);
} }
double CalculatePi(int depth) { double CalculatePi(int depth)
{
double pi = 0.0; double pi = 0.0;
for (int i = 0; i < depth; ++i) { for (int i = 0; i < depth; ++i)
{
double numerator = static_cast<double>(((i % 2) * 2) - 1); double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1); double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator; pi += numerator / denominator;
@ -40,20 +44,24 @@ double CalculatePi(int depth) {
return (pi - 1.0) * 4; return (pi - 1.0) * 4;
} }
std::set<int64_t> ConstructRandomSet(int64_t size) { std::set<int64_t> ConstructRandomSet(int64_t size)
{
std::set<int64_t> s; std::set<int64_t> s;
for (int i = 0; i < size; ++i) s.insert(s.end(), i); for (int i = 0; i < size; ++i)
s.insert(s.end(), i);
return s; return s;
} }
std::mutex test_vector_mu; std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr; std::vector<int> *test_vector = nullptr;
} // end namespace } // end namespace
static void BM_Factorial(benchmark::State& state) { static void BM_Factorial(benchmark::State &state)
{
int fac_42 = 0; int fac_42 = 0;
for (auto _ : state) fac_42 = Factorial(8); for (auto _ : state)
fac_42 = Factorial(8);
// Prevent compiler optimizations // Prevent compiler optimizations
std::stringstream ss; std::stringstream ss;
ss << fac_42; ss << fac_42;
@ -62,18 +70,22 @@ static void BM_Factorial(benchmark::State& state) {
BENCHMARK(BM_Factorial); BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime(); BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) { static void BM_CalculatePiRange(benchmark::State &state)
{
double pi = 0.0; double pi = 0.0;
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0))); for (auto _ : state)
pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss; std::stringstream ss;
ss << pi; ss << pi;
state.SetLabel(ss.str()); state.SetLabel(ss.str());
} }
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) { static void BM_CalculatePi(benchmark::State &state)
{
static const int depth = 1024; static const int depth = 1024;
for (auto _ : state) { for (auto _ : state)
{
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth))); benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
} }
} }
@ -81,13 +93,16 @@ BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32); BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) { static void BM_SetInsert(benchmark::State &state)
{
std::set<int64_t> data; std::set<int64_t> data;
for (auto _ : state) { for (auto _ : state)
{
state.PauseTiming(); state.PauseTiming();
data = ConstructRandomSet(state.range(0)); data = ConstructRandomSet(state.range(0));
state.ResumeTiming(); state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand()); for (int j = 0; j < state.range(1); ++j)
data.insert(rand());
} }
state.SetItemsProcessed(state.iterations() * state.range(1)); state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
@ -97,41 +112,47 @@ static void BM_SetInsert(benchmark::State& state) {
// non-timed part of each iteration will make the benchmark take forever. // non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container, template <typename Container, typename ValueType = typename Container::value_type>
typename ValueType = typename Container::value_type> static void BM_Sequential(benchmark::State &state)
static void BM_Sequential(benchmark::State& state) { {
ValueType v = 42; ValueType v = 42;
for (auto _ : state) { for (auto _ : state)
{
Container c; Container c;
for (int64_t i = state.range(0); --i;) c.push_back(v); for (int64_t i = state.range(0); --i;)
c.push_back(v);
} }
const int64_t items_processed = state.iterations() * state.range(0); const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed); state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v)); state.SetBytesProcessed(items_processed * sizeof(v));
} }
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int) BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10);
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10); BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. // Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512); BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif #endif
static void BM_StringCompare(benchmark::State& state) { static void BM_StringCompare(benchmark::State &state)
{
size_t len = static_cast<size_t>(state.range(0)); size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-'); std::string s1(len, '-');
std::string s2(len, '-'); std::string s2(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); for (auto _ : state)
benchmark::DoNotOptimize(s1.compare(s2));
} }
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) { static void BM_SetupTeardown(benchmark::State &state)
if (state.thread_index == 0) { {
if (state.thread_index == 0)
{
// No need to lock test_vector_mu here as this is running single-threaded. // No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>(); test_vector = new std::vector<int>();
} }
int i = 0; int i = 0;
for (auto _ : state) { for (auto _ : state)
{
std::lock_guard<std::mutex> l(test_vector_mu); std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0) if (i % 2 == 0)
test_vector->push_back(i); test_vector->push_back(i);
@ -139,60 +160,67 @@ static void BM_SetupTeardown(benchmark::State& state) {
test_vector->pop_back(); test_vector->pop_back();
++i; ++i;
} }
if (state.thread_index == 0) { if (state.thread_index == 0)
{
delete test_vector; delete test_vector;
} }
} }
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu(); BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) { static void BM_LongTest(benchmark::State &state)
{
double tracker = 0.0; double tracker = 0.0;
for (auto _ : state) { for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i) for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i); benchmark::DoNotOptimize(tracker += i);
} }
} }
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) { static void BM_ParallelMemset(benchmark::State &state)
{
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int)); int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads; int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index; int from = thread_size * state.thread_index;
int to = from + thread_size; int to = from + thread_size;
if (state.thread_index == 0) { if (state.thread_index == 0)
{
test_vector = new std::vector<int>(static_cast<size_t>(size)); test_vector = new std::vector<int>(static_cast<size_t>(size));
} }
for (auto _ : state) { for (auto _ : state)
for (int i = from; i < to; i++) { {
for (int i = from; i < to; i++)
{
// No need to lock test_vector_mu as ranges // No need to lock test_vector_mu as ranges
// do not overlap between threads. // do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1); benchmark::DoNotOptimize(test_vector->at(i) = 1);
} }
} }
if (state.thread_index == 0) { if (state.thread_index == 0)
{
delete test_vector; delete test_vector;
} }
} }
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4); BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) { static void BM_ManualTiming(benchmark::State &state)
{
int64_t slept_for = 0; int64_t slept_for = 0;
int64_t microseconds = state.range(0); int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{ std::chrono::duration<double, std::micro> sleep_duration{static_cast<double>(microseconds)};
static_cast<double>(microseconds)};
for (auto _ : state) { for (auto _ : state)
{
auto start = std::chrono::high_resolution_clock::now(); auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep // Simulate some useful workload with a sleep
std::this_thread::sleep_for( std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now(); auto end = std::chrono::high_resolution_clock::now();
auto elapsed = auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count()); state.SetIterationTime(elapsed.count());
slept_for += microseconds; slept_for += microseconds;
@ -204,24 +232,29 @@ BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
template <class... Args> template <class... Args> void BM_with_args(benchmark::State &state, Args &&...)
void BM_with_args(benchmark::State& state, Args&&...) { {
for (auto _ : state) { for (auto _ : state)
{
} }
} }
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8));
std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) { void BM_non_template_args(benchmark::State &state, int, double)
while(state.KeepRunning()) {} {
while (state.KeepRunning())
{
}
} }
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11 #endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) { static void BM_DenseThreadRanges(benchmark::State &st)
switch (st.range(0)) { {
switch (st.range(0))
{
case 1: case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3); assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break; break;
@ -229,13 +262,13 @@ static void BM_DenseThreadRanges(benchmark::State& st) {
assert(st.threads == 1 || st.threads == 3 || st.threads == 4); assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break; break;
case 3: case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || st.threads == 14);
st.threads == 14);
break; break;
default: default:
assert(false && "Invalid test case number"); assert(false && "Invalid test case number");
} }
while (st.KeepRunning()) { while (st.KeepRunning())
{
} }
} }
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);

View File

@ -4,16 +4,17 @@
#pragma clang diagnostic ignored "-Wreturn-type" #pragma clang diagnostic ignored "-Wreturn-type"
#endif #endif
extern "C" { extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
} }
// CHECK-LABEL: test_basic: // CHECK-LABEL: test_basic:
extern "C" void test_basic() { extern "C" void test_basic()
{
int x; int x;
benchmark::DoNotOptimize(&x); benchmark::DoNotOptimize(&x);
x = 101; x = 101;
@ -24,7 +25,8 @@ extern "C" void test_basic() {
} }
// CHECK-LABEL: test_redundant_store: // CHECK-LABEL: test_redundant_store:
extern "C" void test_redundant_store() { extern "C" void test_redundant_store()
{
ExternInt = 3; ExternInt = 3;
benchmark::ClobberMemory(); benchmark::ClobberMemory();
ExternInt = 51; ExternInt = 51;
@ -34,7 +36,8 @@ extern "C" void test_redundant_store() {
} }
// CHECK-LABEL: test_redundant_read: // CHECK-LABEL: test_redundant_read:
extern "C" void test_redundant_read() { extern "C" void test_redundant_read()
{
int x; int x;
benchmark::DoNotOptimize(&x); benchmark::DoNotOptimize(&x);
x = ExternInt; x = ExternInt;
@ -48,7 +51,8 @@ extern "C" void test_redundant_read() {
} }
// CHECK-LABEL: test_redundant_read2: // CHECK-LABEL: test_redundant_read2:
extern "C" void test_redundant_read2() { extern "C" void test_redundant_read2()
{
int x; int x;
benchmark::DoNotOptimize(&x); benchmark::DoNotOptimize(&x);
x = ExternInt; x = ExternInt;

View File

@ -4,33 +4,41 @@
#include "../src/internal_macros.h" #include "../src/internal_macros.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace benchmark { namespace benchmark
namespace { {
namespace
{
#if defined(BENCHMARK_OS_WINDOWS) #if defined(BENCHMARK_OS_WINDOWS)
int setenv(const char* name, const char* value, int overwrite) { int setenv(const char *name, const char *value, int overwrite)
if (!overwrite) { {
if (!overwrite)
{
// NOTE: getenv_s is far superior but not available under mingw. // NOTE: getenv_s is far superior but not available under mingw.
char* env_value = getenv(name); char *env_value = getenv(name);
if (env_value == nullptr) { if (env_value == nullptr)
{
return -1; return -1;
} }
} }
return _putenv_s(name, value); return _putenv_s(name, value);
} }
int unsetenv(const char* name) { int unsetenv(const char *name)
{
return _putenv_s(name, ""); return _putenv_s(name, "");
} }
#endif // BENCHMARK_OS_WINDOWS #endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) { TEST(BoolFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true); EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
} }
TEST(BoolFromEnv, False) { TEST(BoolFromEnv, False)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false); EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
@ -88,7 +96,8 @@ TEST(BoolFromEnv, False) {
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(BoolFromEnv, True) { TEST(BoolFromEnv, True)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true); EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
@ -152,46 +161,54 @@ TEST(BoolFromEnv, True) {
#endif #endif
} }
TEST(Int32FromEnv, NotInEnv) { TEST(Int32FromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42); EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
} }
TEST(Int32FromEnv, InvalidInteger) { TEST(Int32FromEnv, InvalidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42); EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(Int32FromEnv, ValidInteger) { TEST(Int32FromEnv, ValidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42); EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(DoubleFromEnv, NotInEnv) { TEST(DoubleFromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51); EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
} }
TEST(DoubleFromEnv, InvalidReal) { TEST(DoubleFromEnv, InvalidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51); EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(DoubleFromEnv, ValidReal) { TEST(DoubleFromEnv, ValidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51); EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");
} }
TEST(StringFromEnv, Default) { TEST(StringFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0); ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo"); EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
} }
TEST(StringFromEnv, Valid) { TEST(StringFromEnv, Valid)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0); ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo"); EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("BENCHMARK_IN_ENV"); unsetenv("BENCHMARK_IN_ENV");

View File

@ -1,28 +1,26 @@
#undef NDEBUG #undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
#include <cmath> #include <cmath>
#include <cstdlib> #include <cstdlib>
#include <vector> #include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace { namespace
{
#define ADD_COMPLEXITY_CASES(...) \ #define ADD_COMPLEXITY_CASES(...) int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name, int AddComplexityTest(std::string test_name, std::string big_o_test_name, std::string rms_test_name, std::string big_o)
std::string rms_test_name, std::string big_o) { {
SetSubstitutions({{"%name", test_name}, SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name}, {"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name}, {"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o}, {"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o}, {"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}}); {"%rms", "[ ]*[0-9]+ %"}});
AddCases( AddCases(TC_ConsoleOut, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}}); {"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
@ -56,9 +54,12 @@ int AddComplexityTest(std::string test_name, std::string big_o_test_name,
// --------------------------- Testing BigO O(1) --------------------------- // // --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) { void BM_Complexity_O1(benchmark::State &state)
for (auto _ : state) { {
for (int i = 0; i < 1024; ++i) { for (auto _ : state)
{
for (int i = 0; i < 1024; ++i)
{
benchmark::DoNotOptimize(&i); benchmark::DoNotOptimize(&i);
} }
} }
@ -66,9 +67,7 @@ void BM_Complexity_O1(benchmark::State& state) {
} }
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1) BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](benchmark::IterationCount) { return 1.0; });
->Range(1, 1 << 18)
->Complexity([](benchmark::IterationCount) { return 1.0; });
const char *one_test_name = "BM_Complexity_O1"; const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
@ -81,53 +80,46 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)"; const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
enum_big_o_1);
// Add auto enum tests // Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
auto_big_o_1);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
lambda_big_o_1);
// ========================================================================= // // ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- // // --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= // // ========================================================================= //
std::vector<int> ConstructRandomVector(int64_t size) { std::vector<int> ConstructRandomVector(int64_t size)
{
std::vector<int> v; std::vector<int> v;
v.reserve(static_cast<int>(size)); v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i)
{
v.push_back(static_cast<int>(std::rand() % size)); v.push_back(static_cast<int>(std::rand() % size));
} }
return v; return v;
} }
void BM_Complexity_O_N(benchmark::State& state) { void BM_Complexity_O_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0)); auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector) // Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2; const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) { for (auto _ : state)
{
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
} }
state.SetComplexityN(state.range(0)); state.SetComplexityN(state.range(0));
} }
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2) ->RangeMultiplier(2)
->Range(1 << 10, 1 << 16) ->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN); ->Complexity([](benchmark::IterationCount n) -> double { return static_cast<double>(n); });
BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_test_name = "BM_Complexity_O_N"; const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
@ -136,39 +128,31 @@ const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)"; const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
enum_auto_big_o_n);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
lambda_big_o_n);
// ========================================================================= // // ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- // // ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= // // ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) { static void BM_Complexity_O_N_log_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0)); auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) { for (auto _ : state)
{
std::sort(v.begin(), v.end()); std::sort(v.begin(), v.end());
} }
state.SetComplexityN(state.range(0)); state.SetComplexityN(state.range(0));
} }
static const double kLog2E = 1.44269504088896340736; static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2) ->RangeMultiplier(2)
->Range(1 << 10, 1 << 16) ->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN); ->Complexity([](benchmark::IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); });
BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
@ -177,37 +161,36 @@ const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)"; const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests // Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests // Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= // // ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ // // -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= // // ========================================================================= //
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { void BM_ComplexityCaptureArgs(benchmark::State &state, int n)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
state.SetComplexityN(n); state.SetComplexityN(n);
} }
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)->Complexity(benchmark::oN)->Ranges({{1, 2}, {3, 4}});
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name = const std::string complexity_capture_name = "BM_ComplexityCaptureArgs/capture_test";
"BM_ComplexityCaptureArgs/capture_test";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", complexity_capture_name + "_RMS", "N");
complexity_capture_name + "_RMS", "N");
// ========================================================================= // // ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -12,8 +12,10 @@
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. #error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
#endif #endif
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
while (state.KeepRunning()) { {
while (state.KeepRunning())
{
volatile benchmark::IterationCount x = state.iterations(); volatile benchmark::IterationCount x = state.iterations();
((void)x); ((void)x);
} }
@ -22,39 +24,43 @@ BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support. // The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03. // Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) { void BM_old_arg_range_interface(benchmark::State &state)
assert((state.range(0) == 1 && state.range(1) == 2) || {
(state.range(0) == 5 && state.range(1) == 6)); assert((state.range(0) == 1 && state.range(1) == 2) || (state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) { while (state.KeepRunning())
{
} }
} }
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6); BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U> template <class T, class U> void BM_template2(benchmark::State &state)
void BM_template2(benchmark::State& state) { {
BM_empty(state); BM_empty(state);
} }
BENCHMARK_TEMPLATE2(BM_template2, int, long); BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T> template <class T> void BM_template1(benchmark::State &state)
void BM_template1(benchmark::State& state) { {
BM_empty(state); BM_empty(state);
} }
BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int); BENCHMARK_TEMPLATE1(BM_template1, int);
template <class T> template <class T> struct BM_Fixture : public ::benchmark::Fixture
struct BM_Fixture : public ::benchmark::Fixture { {
}; };
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State &state)
{
BM_empty(state); BM_empty(state);
} }
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State &state)
{
BM_empty(state); BM_empty(state);
} }
void BM_counters(benchmark::State& state) { void BM_counters(benchmark::State &state)
{
BM_empty(state); BM_empty(state);
state.counters["Foo"] = 2; state.counters["Foo"] = 2;
} }

View File

@ -17,7 +17,8 @@
#define TEST_HAS_NO_EXCEPTIONS #define TEST_HAS_NO_EXCEPTIONS
#endif #endif
void TestHandler() { void TestHandler()
{
#ifndef TEST_HAS_NO_EXCEPTIONS #ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error(""); throw std::logic_error("");
#else #else
@ -25,55 +26,70 @@ void TestHandler() {
#endif #endif
} }
void try_invalid_pause_resume(benchmark::State& state) { void try_invalid_pause_resume(benchmark::State &state)
{
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) #if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try { try
{
state.PauseTiming(); state.PauseTiming();
std::abort(); std::abort();
} catch (std::logic_error const&) {
} }
try { catch (std::logic_error const &)
{
}
try
{
state.ResumeTiming(); state.ResumeTiming();
std::abort(); std::abort();
} catch (std::logic_error const&) { }
catch (std::logic_error const &)
{
} }
#else #else
(void)state; // avoid unused warning (void)state; // avoid unused warning
#endif #endif
} }
void BM_diagnostic_test(benchmark::State& state) { void BM_diagnostic_test(benchmark::State &state)
{
static bool called_once = false; static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
for (auto _ : state) { for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
called_once = true; called_once = true;
} }
BENCHMARK(BM_diagnostic_test); BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State &state)
void BM_diagnostic_test_keep_running(benchmark::State& state) { {
static bool called_once = false; static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
while(state.KeepRunning()) { while (state.KeepRunning())
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
if (called_once == false) try_invalid_pause_resume(state); if (called_once == false)
try_invalid_pause_resume(state);
called_once = true; called_once = true;
} }
BENCHMARK(BM_diagnostic_test_keep_running); BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks(); benchmark::RunSpecifiedBenchmarks();

View File

@ -10,22 +10,24 @@
// reporter in the presence of DisplayAggregatesOnly(). // reporter in the presence of DisplayAggregatesOnly().
// We do not care about console output, the normal tests check that already. // We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv); const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != {
1) {
std::cout << "Precondition mismatch. Expected to only find 6 " std::cout << "Precondition mismatch. Expected to only find 6 "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", "

View File

@ -4,36 +4,45 @@
#pragma clang diagnostic ignored "-Wreturn-type" #pragma clang diagnostic ignored "-Wreturn-type"
#endif #endif
extern "C" { extern "C"
{
extern int ExternInt; extern int ExternInt;
extern int ExternInt2; extern int ExternInt2;
extern int ExternInt3; extern int ExternInt3;
inline int Add42(int x) { return x + 42; } inline int Add42(int x)
{
return x + 42;
}
struct NotTriviallyCopyable { struct NotTriviallyCopyable
{
NotTriviallyCopyable(); NotTriviallyCopyable();
explicit NotTriviallyCopyable(int x) : value(x) {} explicit NotTriviallyCopyable(int x) : value(x)
NotTriviallyCopyable(NotTriviallyCopyable const&); {
}
NotTriviallyCopyable(NotTriviallyCopyable const &);
int value; int value;
}; };
struct Large { struct Large
{
int value; int value;
int data[2]; int data[2];
}; };
} }
// CHECK-LABEL: test_with_rvalue: // CHECK-LABEL: test_with_rvalue:
extern "C" void test_with_rvalue() { extern "C" void test_with_rvalue()
{
benchmark::DoNotOptimize(Add42(0)); benchmark::DoNotOptimize(Add42(0));
// CHECK: movl $42, %eax // CHECK: movl $42, %eax
// CHECK: ret // CHECK: ret
} }
// CHECK-LABEL: test_with_large_rvalue: // CHECK-LABEL: test_with_large_rvalue:
extern "C" void test_with_large_rvalue() { extern "C" void test_with_large_rvalue()
{
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}}); benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: ExternInt(%rip) // CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]] // CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
@ -43,14 +52,16 @@ extern "C" void test_with_large_rvalue() {
} }
// CHECK-LABEL: test_with_non_trivial_rvalue: // CHECK-LABEL: test_with_non_trivial_rvalue:
extern "C" void test_with_non_trivial_rvalue() { extern "C" void test_with_non_trivial_rvalue()
{
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt)); benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: mov{{l|q}} ExternInt(%rip) // CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret // CHECK: ret
} }
// CHECK-LABEL: test_with_lvalue: // CHECK-LABEL: test_with_lvalue:
extern "C" void test_with_lvalue() { extern "C" void test_with_lvalue()
{
int x = 101; int x = 101;
benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(x);
// CHECK-GNU: movl $101, %eax // CHECK-GNU: movl $101, %eax
@ -59,7 +70,8 @@ extern "C" void test_with_lvalue() {
} }
// CHECK-LABEL: test_with_large_lvalue: // CHECK-LABEL: test_with_large_lvalue:
extern "C" void test_with_large_lvalue() { extern "C" void test_with_large_lvalue()
{
Large L{ExternInt, {ExternInt, ExternInt}}; Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L); benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip) // CHECK: ExternInt(%rip)
@ -70,7 +82,8 @@ extern "C" void test_with_large_lvalue() {
} }
// CHECK-LABEL: test_with_non_trivial_lvalue: // CHECK-LABEL: test_with_non_trivial_lvalue:
extern "C" void test_with_non_trivial_lvalue() { extern "C" void test_with_non_trivial_lvalue()
{
NotTriviallyCopyable NTC(ExternInt); NotTriviallyCopyable NTC(ExternInt);
benchmark::DoNotOptimize(NTC); benchmark::DoNotOptimize(NTC);
// CHECK: ExternInt(%rip) // CHECK: ExternInt(%rip)
@ -79,7 +92,8 @@ extern "C" void test_with_non_trivial_lvalue() {
} }
// CHECK-LABEL: test_with_const_lvalue: // CHECK-LABEL: test_with_const_lvalue:
extern "C" void test_with_const_lvalue() { extern "C" void test_with_const_lvalue()
{
const int x = 123; const int x = 123;
benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(x);
// CHECK: movl $123, %eax // CHECK: movl $123, %eax
@ -87,7 +101,8 @@ extern "C" void test_with_const_lvalue() {
} }
// CHECK-LABEL: test_with_large_const_lvalue: // CHECK-LABEL: test_with_large_const_lvalue:
extern "C" void test_with_large_const_lvalue() { extern "C" void test_with_large_const_lvalue()
{
const Large L{ExternInt, {ExternInt, ExternInt}}; const Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L); benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip) // CHECK: ExternInt(%rip)
@ -98,7 +113,8 @@ extern "C" void test_with_large_const_lvalue() {
} }
// CHECK-LABEL: test_with_non_trivial_const_lvalue: // CHECK-LABEL: test_with_non_trivial_const_lvalue:
extern "C" void test_with_non_trivial_const_lvalue() { extern "C" void test_with_non_trivial_const_lvalue()
{
const NotTriviallyCopyable Obj(ExternInt); const NotTriviallyCopyable Obj(ExternInt);
benchmark::DoNotOptimize(Obj); benchmark::DoNotOptimize(Obj);
// CHECK: mov{{q|l}} ExternInt(%rip) // CHECK: mov{{q|l}} ExternInt(%rip)
@ -106,7 +122,8 @@ extern "C" void test_with_non_trivial_const_lvalue() {
} }
// CHECK-LABEL: test_div_by_two: // CHECK-LABEL: test_div_by_two:
extern "C" int test_div_by_two(int input) { extern "C" int test_div_by_two(int input)
{
int divisor = 2; int divisor = 2;
benchmark::DoNotOptimize(divisor); benchmark::DoNotOptimize(divisor);
return input / divisor; return input / divisor;
@ -116,9 +133,10 @@ extern "C" int test_div_by_two(int input) {
} }
// CHECK-LABEL: test_inc_integer: // CHECK-LABEL: test_inc_integer:
extern "C" int test_inc_integer() { extern "C" int test_inc_integer()
{
int x = 0; int x = 0;
for (int i=0; i < 5; ++i) for (int i = 0; i < 5; ++i)
benchmark::DoNotOptimize(++x); benchmark::DoNotOptimize(++x);
// CHECK: movl $1, [[DEST:.*]] // CHECK: movl $1, [[DEST:.*]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]]
@ -131,7 +149,8 @@ extern "C" int test_inc_integer() {
} }
// CHECK-LABEL: test_pointer_rvalue // CHECK-LABEL: test_pointer_rvalue
extern "C" void test_pointer_rvalue() { extern "C" void test_pointer_rvalue()
{
// CHECK: movl $42, [[DEST:.*]] // CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax // CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
@ -141,18 +160,20 @@ extern "C" void test_pointer_rvalue() {
} }
// CHECK-LABEL: test_pointer_const_lvalue: // CHECK-LABEL: test_pointer_const_lvalue:
extern "C" void test_pointer_const_lvalue() { extern "C" void test_pointer_const_lvalue()
{
// CHECK: movl $42, [[DEST:.*]] // CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax // CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret // CHECK: ret
int x = 42; int x = 42;
int * const xp = &x; int *const xp = &x;
benchmark::DoNotOptimize(xp); benchmark::DoNotOptimize(xp);
} }
// CHECK-LABEL: test_pointer_lvalue: // CHECK-LABEL: test_pointer_lvalue:
extern "C" void test_pointer_lvalue() { extern "C" void test_pointer_lvalue()
{
// CHECK: movl $42, [[DEST:.*]] // CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax // CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]]) // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])

View File

@ -2,30 +2,40 @@
#include <cstdint> #include <cstdint>
namespace { namespace
{
#if defined(__GNUC__) #if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif #endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; } std::uint64_t double_up(const std::uint64_t x)
{
return x * 2;
} }
} // namespace
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems // Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang. // with the inline assembly on both GCC and Clang.
struct BitRef { struct BitRef
{
int index; int index;
unsigned char &byte; unsigned char &byte;
public: public:
static BitRef Make() { static BitRef Make()
{
static unsigned char arr[2] = {}; static unsigned char arr[2] = {};
BitRef b(1, arr[0]); BitRef b(1, arr[0]);
return b; return b;
} }
private:
BitRef(int i, unsigned char& b) : index(i), byte(b) {} private:
BitRef(int i, unsigned char &b) : index(i), byte(b)
{
}
}; };
int main(int, char*[]) { int main(int, char *[])
{
// this test verifies compilation of DoNotOptimize() for some types // this test verifies compilation of DoNotOptimize() for some types
char buffer8[8] = ""; char buffer8[8] = "";

View File

@ -10,24 +10,35 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
{
public: public:
virtual bool ReportContext(const Context& context) { virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context); return ConsoleReporter::ReportContext(context);
}; };
virtual void ReportRuns(const std::vector<Run>& report) { virtual void ReportRuns(const std::vector<Run> &report)
{
++count_; ++count_;
ConsoleReporter::ReportRuns(report); ConsoleReporter::ReportRuns(report);
}; };
TestReporter() : count_(0) {} TestReporter() : count_(0)
{
}
virtual ~TestReporter() {} virtual ~TestReporter()
{
}
size_t GetCount() const { return count_; } size_t GetCount() const
{
return count_;
}
private: private:
mutable size_t count_; mutable size_t count_;
@ -35,67 +46,77 @@ class TestReporter : public benchmark::ConsoleReporter {
} // end namespace } // end namespace
static void NoPrefix(benchmark::State& state) { static void NoPrefix(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(NoPrefix); BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) { static void BM_Foo(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_Foo); BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) { static void BM_Bar(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_Bar); BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) { static void BM_FooBar(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_FooBar); BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) { static void BM_FooBa(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_FooBa); BENCHMARK(BM_FooBa);
int main(int argc, char **argv) { int main(int argc, char **argv)
{
bool list_only = false; bool list_only = false;
for (int i = 0; i < argc; ++i) for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") != list_only |= std::string(argv[i]).find("--benchmark_list_tests") != std::string::npos;
std::string::npos;
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
TestReporter test_reporter; TestReporter test_reporter;
const size_t returned_count = const size_t returned_count = benchmark::RunSpecifiedBenchmarks(&test_reporter);
benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) { if (argc == 2)
{
// Make sure we ran all of the tests // Make sure we ran all of the tests
std::stringstream ss(argv[1]); std::stringstream ss(argv[1]);
size_t expected_return; size_t expected_return;
ss >> expected_return; ss >> expected_return;
if (returned_count != expected_return) { if (returned_count != expected_return)
{
std::cerr << "ERROR: Expected " << expected_return std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = " << " tests to match the filter but returned_count = " << returned_count << std::endl;
<< returned_count << std::endl;
return -1; return -1;
} }
const size_t expected_reports = list_only ? 0 : expected_return; const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount(); const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) { if (reports_count != expected_reports)
{
std::cerr << "ERROR: Expected " << expected_reports std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count << " tests to be run but reported_count = " << reports_count << std::endl;
<< std::endl;
return -1; return -1;
} }
} }

View File

@ -4,40 +4,53 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
class MyFixture : public ::benchmark::Fixture { class MyFixture : public ::benchmark::Fixture
{
public: public:
void SetUp(const ::benchmark::State& state) { void SetUp(const ::benchmark::State &state)
if (state.thread_index == 0) { {
if (state.thread_index == 0)
{
assert(data.get() == nullptr); assert(data.get() == nullptr);
data.reset(new int(42)); data.reset(new int(42));
} }
} }
void TearDown(const ::benchmark::State& state) { void TearDown(const ::benchmark::State &state)
if (state.thread_index == 0) { {
if (state.thread_index == 0)
{
assert(data.get() != nullptr); assert(data.get() != nullptr);
data.reset(); data.reset();
} }
} }
~MyFixture() { assert(data == nullptr); } ~MyFixture()
{
assert(data == nullptr);
}
std::unique_ptr<int> data; std::unique_ptr<int> data;
}; };
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { BENCHMARK_F(MyFixture, Foo)(benchmark::State &st)
{
assert(data.get() != nullptr); assert(data.get() != nullptr);
assert(*data == 42); assert(*data == 42);
for (auto _ : st) { for (auto _ : st)
{
} }
} }
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State &st)
if (st.thread_index == 0) { {
if (st.thread_index == 0)
{
assert(data.get() != nullptr); assert(data.get() != nullptr);
assert(*data == 42); assert(*data == 42);
} }
for (auto _ : st) { for (auto _ : st)
{
assert(data.get() != nullptr); assert(data.get() != nullptr);
assert(*data == 42); assert(*data == 42);
} }

View File

@ -1,27 +1,26 @@
#undef NDEBUG #undef NDEBUG
#include <chrono>
#include <thread>
#include "../src/timers.h" #include "../src/timers.h"
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "output_test.h" #include "output_test.h"
#include <chrono>
#include <thread>
static const std::chrono::duration<double, std::milli> time_frame(50); static const std::chrono::duration<double, std::milli> time_frame(50);
static const double time_frame_in_sec( static const double time_frame_in_sec(
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>( std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(time_frame).count());
time_frame)
.count());
void MyBusySpinwait() { void MyBusySpinwait()
{
const auto start = benchmark::ChronoClockNow(); const auto start = benchmark::ChronoClockNow();
while (true) { while (true)
{
const auto now = benchmark::ChronoClockNow(); const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start; const auto elapsed = now - start;
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= time_frame)
time_frame)
return; return;
} }
} }
@ -33,152 +32,92 @@ void MyBusySpinwait() {
// ========================================================================= // // ========================================================================= //
// BM_MainThread // BM_MainThread
void BM_MainThread(benchmark::State& state) { void BM_MainThread(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
MyBusySpinwait(); MyBusySpinwait();
state.SetIterationTime(time_frame_in_sec); state.SetIterationTime(time_frame_in_sec);
} }
state.counters["invtime"] = state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// BM_WorkerThread // BM_WorkerThread
void BM_WorkerThread(benchmark::State& state) { void BM_WorkerThread(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait); std::thread Worker(&MyBusySpinwait);
Worker.join(); Worker.join();
state.SetIterationTime(time_frame_in_sec); state.SetIterationTime(time_frame_in_sec);
} }
state.counters["invtime"] = state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
->Iterations(1) BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// BM_MainThreadAndWorkerThread // BM_MainThreadAndWorkerThread
void BM_MainThreadAndWorkerThread(benchmark::State& state) { void BM_MainThreadAndWorkerThread(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait); std::thread Worker(&MyBusySpinwait);
MyBusySpinwait(); MyBusySpinwait();
Worker.join(); Worker.join();
state.SetIterationTime(time_frame_in_sec); state.SetIterationTime(time_frame_in_sec);
} }
state.counters["invtime"] = state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
benchmark::Counter{1, benchmark::Counter::kIsRate};
} }
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
->Threads(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
->UseRealTime(); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
->Iterations(1)
->Threads(1)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
->Iterations(1) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
->Threads(2) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
->UseRealTime(); BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread) BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
->Iterations(1)
->Threads(2)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
// ========================================================================= // // ========================================================================= //
// ---------------------------- TEST CASES END ----------------------------- // // ---------------------------- TEST CASES END ----------------------------- //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -1,7 +1,9 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
} }

View File

@ -3,11 +3,14 @@
#include <cstdlib> #include <cstdlib>
#include <map> #include <map>
namespace { namespace
{
std::map<int, int> ConstructRandomMap(int size) { std::map<int, int> ConstructRandomMap(int size)
{
std::map<int, int> m; std::map<int, int> m;
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i)
{
m.insert(std::make_pair(std::rand() % size, std::rand() % size)); m.insert(std::make_pair(std::rand() % size, std::rand() % size));
} }
return m; return m;
@ -16,14 +19,17 @@ std::map<int, int> ConstructRandomMap(int size) {
} // namespace } // namespace
// Basic version. // Basic version.
static void BM_MapLookup(benchmark::State& state) { static void BM_MapLookup(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0)); const int size = static_cast<int>(state.range(0));
std::map<int, int> m; std::map<int, int> m;
for (auto _ : state) { for (auto _ : state)
{
state.PauseTiming(); state.PauseTiming();
m = ConstructRandomMap(size); m = ConstructRandomMap(size);
state.ResumeTiming(); state.ResumeTiming();
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size)); benchmark::DoNotOptimize(m.find(std::rand() % size));
} }
} }
@ -32,21 +38,29 @@ static void BM_MapLookup(benchmark::State& state) {
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures. // Using fixtures.
class MapFixture : public ::benchmark::Fixture { class MapFixture : public ::benchmark::Fixture
{
public: public:
void SetUp(const ::benchmark::State& st) { void SetUp(const ::benchmark::State &st)
{
m = ConstructRandomMap(static_cast<int>(st.range(0))); m = ConstructRandomMap(static_cast<int>(st.range(0)));
} }
void TearDown(const ::benchmark::State&) { m.clear(); } void TearDown(const ::benchmark::State &)
{
m.clear();
}
std::map<int, int> m; std::map<int, int> m;
}; };
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0)); const int size = static_cast<int>(state.range(0));
for (auto _ : state) { for (auto _ : state)
for (int i = 0; i < size; ++i) { {
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size)); benchmark::DoNotOptimize(m.find(std::rand() % size));
} }
} }

View File

@ -4,16 +4,22 @@
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
#include "output_test.h" #include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager { class TestMemoryManager : public benchmark::MemoryManager
void Start() {} {
void Stop(Result* result) { void Start()
{
}
void Stop(Result *result)
{
result->num_allocs = 42; result->num_allocs = 42;
result->max_bytes_used = 42000; result->max_bytes_used = 42000;
} }
}; };
void BM_empty(benchmark::State& state) { void BM_empty(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
} }
@ -35,7 +41,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager()); std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get()); benchmark::RegisterMemoryManager(mm.get());

View File

@ -5,7 +5,8 @@
#include <set> #include <set>
#include <vector> #include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture { class MultipleRangesFixture : public ::benchmark::Fixture
{
public: public:
MultipleRangesFixture() MultipleRangesFixture()
: expectedValues({{1, 3, 5}, : expectedValues({{1, 3, 5},
@ -26,11 +27,13 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
{2, 7, 5}, {2, 7, 5},
{2, 7, 8}, {2, 7, 8},
{2, 7, 15}, {2, 7, 15},
{7, 6, 3}}) {} {7, 6, 3}})
{
}
void SetUp(const ::benchmark::State& state) { void SetUp(const ::benchmark::State &state)
std::vector<int64_t> ranges = {state.range(0), state.range(1), {
state.range(2)}; std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end()); assert(expectedValues.find(ranges) != expectedValues.end());
@ -39,20 +42,26 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
// NOTE: This is not TearDown as we want to check after _all_ runs are // NOTE: This is not TearDown as we want to check after _all_ runs are
// complete. // complete.
virtual ~MultipleRangesFixture() { virtual ~MultipleRangesFixture()
if (actualValues != expectedValues) { {
if (actualValues != expectedValues)
{
std::cout << "EXPECTED\n"; std::cout << "EXPECTED\n";
for (auto v : expectedValues) { for (auto v : expectedValues)
{
std::cout << "{"; std::cout << "{";
for (int64_t iv : v) { for (int64_t iv : v)
{
std::cout << iv << ", "; std::cout << iv << ", ";
} }
std::cout << "}\n"; std::cout << "}\n";
} }
std::cout << "ACTUAL\n"; std::cout << "ACTUAL\n";
for (auto v : actualValues) { for (auto v : actualValues)
{
std::cout << "{"; std::cout << "{";
for (int64_t iv : v) { for (int64_t iv : v)
{
std::cout << iv << ", "; std::cout << iv << ", ";
} }
std::cout << "}\n"; std::cout << "}\n";
@ -64,10 +73,13 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
std::set<std::vector<int64_t>> actualValues; std::set<std::vector<int64_t>> actualValues;
}; };
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
int64_t product = state.range(0) * state.range(1) * state.range(2); int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++) { for (int64_t x = 0; x < product; x++)
{
benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(x);
} }
} }
@ -78,17 +90,21 @@ BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->Ranges({{1, 2}, {3, 7}, {5, 15}}) ->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3}); ->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) { void BM_CheckDefaultArgument(benchmark::State &state)
{
// Test that the 'range()' without an argument is the same as 'range(0)'. // Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0)); assert(state.range() == state.range(0));
assert(state.range() != state.range(1)); assert(state.range() != state.range(1));
for (auto _ : state) { for (auto _ : state)
{
} }
} }
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) { static void BM_MultipleRanges(benchmark::State &st)
for (auto _ : st) { {
for (auto _ : st)
{
} }
} }
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});

View File

@ -7,16 +7,19 @@
#endif #endif
#include <cassert> #include <cassert>
void BM_basic(benchmark::State& state) { void BM_basic(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
void BM_basic_slow(benchmark::State& state) { void BM_basic_slow(benchmark::State &state)
{
std::chrono::milliseconds sleep_duration(state.range(0)); std::chrono::milliseconds sleep_duration(state.range(0));
for (auto _ : state) { for (auto _ : state)
std::this_thread::sleep_for( {
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration)); std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
} }
} }
@ -37,8 +40,7 @@ BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3); BENCHMARK(BM_basic)->Repetitions(3);
BENCHMARK(BM_basic) BENCHMARK(BM_basic)
->RangeMultiplier(std::numeric_limits<int>::max()) ->RangeMultiplier(std::numeric_limits<int>::max())
->Range(std::numeric_limits<int64_t>::min(), ->Range(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max());
std::numeric_limits<int64_t>::max());
// Negative ranges // Negative ranges
BENCHMARK(BM_basic)->Range(-64, -1); BENCHMARK(BM_basic)->Range(-64, -1);
@ -46,15 +48,18 @@ BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
BENCHMARK(BM_basic)->DenseRange(-2, 2, 1); BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}}); BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) { void CustomArgs(benchmark::internal::Benchmark *b)
for (int i = 0; i < 10; ++i) { {
for (int i = 0; i < 10; ++i)
{
b->Arg(i); b->Arg(i);
} }
} }
BENCHMARK(BM_basic)->Apply(CustomArgs); BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& state) { void BM_explicit_iteration_count(benchmark::State &state)
{
// Test that benchmarks specified with an explicit iteration count are // Test that benchmarks specified with an explicit iteration count are
// only run once. // only run once.
static bool invoked_before = false; static bool invoked_before = false;
@ -68,7 +73,6 @@ void BM_explicit_iteration_count(benchmark::State& state) {
++actual_iterations; ++actual_iterations;
assert(state.iterations() == state.max_iterations); assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42); assert(state.iterations() == 42);
} }
BENCHMARK(BM_explicit_iteration_count)->Iterations(42); BENCHMARK(BM_explicit_iteration_count)->Iterations(42);

View File

@ -18,17 +18,18 @@
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) #define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \ #define SET_SUBSTITUTIONS(...) int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules { enum MatchRules
{
MR_Default, // Skip non-matching lines until a match is found. MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line. MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches MR_Not // No line between the current position and the next match matches
// the regex // the regex
}; };
struct TestCase { struct TestCase
{
TestCase(std::string re, int rule = MR_Default); TestCase(std::string re, int rule = MR_Default);
std::string regex_str; std::string regex_str;
@ -37,7 +38,8 @@ struct TestCase {
std::shared_ptr<benchmark::Regex> regex; std::shared_ptr<benchmark::Regex> regex;
}; };
enum TestCaseID { enum TestCaseID
{
TC_ConsoleOut, TC_ConsoleOut,
TC_ConsoleErr, TC_ConsoleErr,
TC_JSONOut, TC_JSONOut,
@ -54,18 +56,17 @@ int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's // Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions. // See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions( int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il);
std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests. // Run all output tests.
void RunOutputTests(int argc, char* argv[]); void RunOutputTests(int argc, char *argv[]);
// Count the number of 'pat' substrings in the 'haystack' string. // Count the number of 'pat' substrings in the 'haystack' string.
int SubstrCnt(const std::string& haystack, const std::string& pat); int SubstrCnt(const std::string &haystack, const std::string &pat);
// Run registered benchmarks with file reporter enabled, and return the content // Run registered benchmarks with file reporter enabled, and return the content
// outputted by the file reporter. // outputted by the file reporter.
std::string GetFileReporterOutput(int argc, char* argv[]); std::string GetFileReporterOutput(int argc, char *argv[]);
// ========================================================================= // // ========================================================================= //
// ------------------------- Results checking ------------------------------ // // ------------------------- Results checking ------------------------------ //
@ -83,25 +84,32 @@ std::string GetFileReporterOutput(int argc, char* argv[]);
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
struct Results; struct Results;
typedef std::function<void(Results const&)> ResultsCheckFn; typedef std::function<void(Results const &)> ResultsCheckFn;
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); size_t AddChecker(const char *bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark. // Class holding the results of a benchmark.
// It is passed in calls to checker functions. // It is passed in calls to checker functions.
struct Results { struct Results
{
// the benchmark name // the benchmark name
std::string name; std::string name;
// the benchmark fields // the benchmark fields
std::map<std::string, std::string> values; std::map<std::string, std::string> values;
Results(const std::string& n) : name(n) {} Results(const std::string &n) : name(n)
{
}
int NumThreads() const; int NumThreads() const;
double NumIterations() const; double NumIterations() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime; typedef enum
{
kCpuTime,
kRealTime
} BenchmarkTime;
// get cpu_time or real_time in seconds // get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const; double GetTime(BenchmarkTime which) const;
@ -109,40 +117,43 @@ struct Results {
// get the real_time duration of the benchmark in seconds. // get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float // it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy. // ASCII formatting is lossy.
double DurationRealTime() const { double DurationRealTime() const
{
return NumIterations() * GetTime(kRealTime); return NumIterations() * GetTime(kRealTime);
} }
// get the cpu_time duration of the benchmark in seconds // get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const { double DurationCPUTime() const
{
return NumIterations() * GetTime(kCpuTime); return NumIterations() * GetTime(kCpuTime);
} }
// get the string for a result by name, or nullptr if the name // get the string for a result by name, or nullptr if the name
// is not found // is not found
const std::string* Get(const char* entry_name) const { const std::string *Get(const char *entry_name) const
{
auto it = values.find(entry_name); auto it = values.find(entry_name);
if (it == values.end()) return nullptr; if (it == values.end())
return nullptr;
return &it->second; return &it->second;
} }
// get a result by name, parsed as a specific type. // get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead. // NOTE: for counters, use GetCounterAs instead.
template <class T> template <class T> T GetAs(const char *entry_name) const;
T GetAs(const char* entry_name) const;
// counters are written as doubles, so they have to be read first // counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type. // as a double, and only then converted to the asked type.
template <class T> template <class T> T GetCounterAs(const char *entry_name) const
T GetCounterAs(const char* entry_name) const { {
double dval = GetAs<double>(entry_name); double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval); T tval = static_cast<T>(dval);
return tval; return tval;
} }
}; };
template <class T> template <class T> T Results::GetAs(const char *entry_name) const
T Results::GetAs(const char* entry_name) const { {
auto* sv = Get(entry_name); auto *sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty()); CHECK(sv != nullptr && !sv->empty());
std::stringstream ss; std::stringstream ss;
ss << *sv; ss << *sv;
@ -204,9 +215,10 @@ T Results::GetAs(const char* entry_name) const {
// --------------------------- Misc Utilities ------------------------------ // // --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= // // ========================================================================= //
namespace { namespace
{
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; const char *const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace } // end namespace

View File

@ -16,8 +16,10 @@
// ========================================================================= // // ========================================================================= //
// ------------------------------ Internals -------------------------------- // // ------------------------------ Internals -------------------------------- //
// ========================================================================= // // ========================================================================= //
namespace internal { namespace internal
namespace { {
namespace
{
using TestCaseList = std::vector<TestCase>; using TestCaseList = std::vector<TestCase>;
@ -28,14 +30,16 @@ using TestCaseList = std::vector<TestCase>;
// Substitute("%HelloWorld") // Always expands to Hello. // Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>; using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) { TestCaseList &GetTestCaseList(TestCaseID ID)
{
// Uses function-local statics to ensure initialization occurs // Uses function-local statics to ensure initialization occurs
// before first use. // before first use.
static TestCaseList lists[TC_NumID]; static TestCaseList lists[TC_NumID];
return lists[ID]; return lists[ID];
} }
SubMap& GetSubstitutions() { SubMap &GetSubstitutions()
{
// Don't use 'dec_re' from header because it may not yet be initialized. // Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off // clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
@ -69,13 +73,16 @@ SubMap& GetSubstitutions() {
return map; return map;
} }
std::string PerformSubstitutions(std::string source) { std::string PerformSubstitutions(std::string source)
SubMap const& subs = GetSubstitutions(); {
SubMap const &subs = GetSubstitutions();
using SizeT = std::string::size_type; using SizeT = std::string::size_type;
for (auto const& KV : subs) { for (auto const &KV : subs)
{
SizeT pos; SizeT pos;
SizeT next_start = 0; SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) { while ((pos = source.find(KV.first, next_start)) != std::string::npos)
{
next_start = pos + KV.second.size(); next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second); source.replace(pos, KV.first.size(), KV.second);
} }
@ -83,44 +90,47 @@ std::string PerformSubstitutions(std::string source) {
return source; return source;
} }
void CheckCase(std::stringstream& remaining_output, TestCase const& TC, void CheckCase(std::stringstream &remaining_output, TestCase const &TC, TestCaseList const &not_checks)
TestCaseList const& not_checks) { {
std::string first_line; std::string first_line;
bool on_first = true; bool on_first = true;
std::string line; std::string line;
while (remaining_output.eof() == false) { while (remaining_output.eof() == false)
{
CHECK(remaining_output.good()); CHECK(remaining_output.good());
std::getline(remaining_output, line); std::getline(remaining_output, line);
if (on_first) { if (on_first)
{
first_line = line; first_line = line;
on_first = false; on_first = false;
} }
for (const auto& NC : not_checks) { for (const auto &NC : not_checks)
{
CHECK(!NC.regex->Match(line)) CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line; << "\n started matching near: " << first_line;
} }
if (TC.regex->Match(line)) return; if (TC.regex->Match(line))
CHECK(TC.match_rule != MR_Next) return;
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\""
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line; << "\n started matching near: " << first_line;
} }
CHECK(remaining_output.eof() == false) CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str << "End of output reached before match for regex \"" << TC.regex_str << "\" was found"
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line; << "\n started matching near: " << first_line;
} }
void CheckCases(TestCaseList const& checks, std::stringstream& output) { void CheckCases(TestCaseList const &checks, std::stringstream &output)
{
std::vector<TestCase> not_checks; std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) { for (size_t i = 0; i < checks.size(); ++i)
const auto& TC = checks[i]; {
if (TC.match_rule == MR_Not) { const auto &TC = checks[i];
if (TC.match_rule == MR_Not)
{
not_checks.push_back(TC); not_checks.push_back(TC);
continue; continue;
} }
@ -129,18 +139,21 @@ void CheckCases(TestCaseList const& checks, std::stringstream& output) {
} }
} }
class TestReporter : public benchmark::BenchmarkReporter { class TestReporter : public benchmark::BenchmarkReporter
{
public: public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps) TestReporter(std::vector<benchmark::BenchmarkReporter *> reps) : reporters_(reps)
: reporters_(reps) {} {
}
virtual bool ReportContext(const Context& context) { virtual bool ReportContext(const Context &context)
{
bool last_ret = false; bool last_ret = false;
bool first = true; bool first = true;
for (auto rep : reporters_) { for (auto rep : reporters_)
{
bool new_ret = rep->ReportContext(context); bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret) CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext";
<< "Reports return different values for ReportContext";
first = false; first = false;
last_ret = new_ret; last_ret = new_ret;
} }
@ -148,15 +161,19 @@ class TestReporter : public benchmark::BenchmarkReporter {
return last_ret; return last_ret;
} }
void ReportRuns(const std::vector<Run>& report) { void ReportRuns(const std::vector<Run> &report)
for (auto rep : reporters_) rep->ReportRuns(report); {
for (auto rep : reporters_)
rep->ReportRuns(report);
} }
void Finalize() { void Finalize()
for (auto rep : reporters_) rep->Finalize(); {
for (auto rep : reporters_)
rep->Finalize();
} }
private: private:
std::vector<benchmark::BenchmarkReporter*> reporters_; std::vector<benchmark::BenchmarkReporter *> reporters_;
}; };
} // namespace } // namespace
@ -166,15 +183,19 @@ class TestReporter : public benchmark::BenchmarkReporter {
// -------------------------- Results checking ----------------------------- // // -------------------------- Results checking ----------------------------- //
// ========================================================================= // // ========================================================================= //
namespace internal { namespace internal
{
// Utility class to manage subscribers for checking benchmark results. // Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results. // It works by parsing the CSV output to read the results.
class ResultsChecker { class ResultsChecker
{
public: public:
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes struct PatternAndFn : public TestCase
PatternAndFn(const std::string& rx, ResultsCheckFn fn_) { // reusing TestCase for its regexes
: TestCase(rx), fn(fn_) {} PatternAndFn(const std::string &rx, ResultsCheckFn fn_) : TestCase(rx), fn(fn_)
{
}
ResultsCheckFn fn; ResultsCheckFn fn;
}; };
@ -182,48 +203,54 @@ class ResultsChecker {
std::vector<Results> results; std::vector<Results> results;
std::vector<std::string> field_names; std::vector<std::string> field_names;
void Add(const std::string& entry_pattern, ResultsCheckFn fn); void Add(const std::string &entry_pattern, ResultsCheckFn fn);
void CheckResults(std::stringstream& output); void CheckResults(std::stringstream &output);
private: private:
void SetHeader_(const std::string& csv_header); void SetHeader_(const std::string &csv_header);
void SetValues_(const std::string& entry_csv_line); void SetValues_(const std::string &entry_csv_line);
std::vector<std::string> SplitCsv_(const std::string& line); std::vector<std::string> SplitCsv_(const std::string &line);
}; };
// store the static ResultsChecker in a function to prevent initialization // store the static ResultsChecker in a function to prevent initialization
// order problems // order problems
ResultsChecker& GetResultsChecker() { ResultsChecker &GetResultsChecker()
{
static ResultsChecker rc; static ResultsChecker rc;
return rc; return rc;
} }
// add a results checker for a benchmark // add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { void ResultsChecker::Add(const std::string &entry_pattern, ResultsCheckFn fn)
{
check_patterns.emplace_back(entry_pattern, fn); check_patterns.emplace_back(entry_pattern, fn);
} }
// check the results of all subscribed benchmarks // check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) { void ResultsChecker::CheckResults(std::stringstream &output)
{
// first reset the stream to the start // first reset the stream to the start
{ {
auto start = std::stringstream::pos_type(0); auto start = std::stringstream::pos_type(0);
// clear before calling tellg() // clear before calling tellg()
output.clear(); output.clear();
// seek to zero only when needed // seek to zero only when needed
if (output.tellg() > start) output.seekg(start); if (output.tellg() > start)
output.seekg(start);
// and just in case // and just in case
output.clear(); output.clear();
} }
// now go over every line and publish it to the ResultsChecker // now go over every line and publish it to the ResultsChecker
std::string line; std::string line;
bool on_first = true; bool on_first = true;
while (output.eof() == false) { while (output.eof() == false)
{
CHECK(output.good()); CHECK(output.good());
std::getline(output, line); std::getline(output, line);
if (on_first) { if (on_first)
{
SetHeader_(line); // this is important SetHeader_(line); // this is important
on_first = false; on_first = false;
continue; continue;
@ -231,14 +258,19 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
SetValues_(line); SetValues_(line);
} }
// finally we can call the subscribed check functions // finally we can call the subscribed check functions
for (const auto& p : check_patterns) { for (const auto &p : check_patterns)
{
VLOG(2) << "--------------------------------\n"; VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto& r : results) { for (const auto &r : results)
if (!p.regex->Match(r.name)) { {
if (!p.regex->Match(r.name))
{
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue; continue;
} else { }
else
{
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
} }
VLOG(1) << "Checking results of " << r.name << ": ... \n"; VLOG(1) << "Checking results of " << r.name << ": ... \n";
@ -249,56 +281,71 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
} }
// prepare for the names in this header // prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) { void ResultsChecker::SetHeader_(const std::string &csv_header)
{
field_names = SplitCsv_(csv_header); field_names = SplitCsv_(csv_header);
} }
// set the values for a benchmark // set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) { void ResultsChecker::SetValues_(const std::string &entry_csv_line)
if (entry_csv_line.empty()) return; // some lines are empty {
if (entry_csv_line.empty())
return; // some lines are empty
CHECK(!field_names.empty()); CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line); auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size()); CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto& entry = results.back(); auto &entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) { for (size_t i = 1, e = vals.size(); i < e; ++i)
{
entry.values[field_names[i]] = vals[i]; entry.values[field_names[i]] = vals[i];
} }
} }
// a quick'n'dirty csv splitter (eliminating quotes) // a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) { std::vector<std::string> ResultsChecker::SplitCsv_(const std::string &line)
{
std::vector<std::string> out; std::vector<std::string> out;
if (line.empty()) return out; if (line.empty())
if (!field_names.empty()) out.reserve(field_names.size()); return out;
if (!field_names.empty())
out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos; size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) { while (pos != line.npos)
{
CHECK(curr > 0); CHECK(curr > 0);
if (line[prev] == '"') ++prev; if (line[prev] == '"')
if (line[curr - 1] == '"') --curr; ++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev)); out.push_back(line.substr(prev, curr - prev));
prev = pos + 1; prev = pos + 1;
pos = line.find_first_of(',', pos + 1); pos = line.find_first_of(',', pos + 1);
curr = pos; curr = pos;
} }
curr = line.size(); curr = line.size();
if (line[prev] == '"') ++prev; if (line[prev] == '"')
if (line[curr - 1] == '"') --curr; ++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev)); out.push_back(line.substr(prev, curr - prev));
return out; return out;
} }
} // end namespace internal } // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { size_t AddChecker(const char *bm_name, ResultsCheckFn fn)
auto& rc = internal::GetResultsChecker(); {
auto &rc = internal::GetResultsChecker();
rc.Add(bm_name, fn); rc.Add(bm_name, fn);
return rc.results.size(); return rc.results.size();
} }
int Results::NumThreads() const { int Results::NumThreads() const
{
auto pos = name.find("/threads:"); auto pos = name.find("/threads:");
if (pos == name.npos) return 1; if (pos == name.npos)
return 1;
auto end = name.find('/', pos + 9); auto end = name.find('/', pos + 9);
std::stringstream ss; std::stringstream ss;
ss << name.substr(pos + 9, end); ss << name.substr(pos + 9, end);
@ -308,25 +355,36 @@ int Results::NumThreads() const {
return num; return num;
} }
double Results::NumIterations() const { double Results::NumIterations() const
{
return GetAs<double>("iterations"); return GetAs<double>("iterations");
} }
double Results::GetTime(BenchmarkTime which) const { double Results::GetTime(BenchmarkTime which) const
{
CHECK(which == kCpuTime || which == kRealTime); CHECK(which == kCpuTime || which == kRealTime);
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str); double val = GetAs<double>(which_str);
auto unit = Get("time_unit"); auto unit = Get("time_unit");
CHECK(unit); CHECK(unit);
if (*unit == "ns") { if (*unit == "ns")
{
return val * 1.e-9; return val * 1.e-9;
} else if (*unit == "us") { }
else if (*unit == "us")
{
return val * 1.e-6; return val * 1.e-6;
} else if (*unit == "ms") { }
else if (*unit == "ms")
{
return val * 1.e-3; return val * 1.e-3;
} else if (*unit == "s") { }
else if (*unit == "s")
{
return val; return val;
} else { }
else
{
CHECK(1 == 0) << "unknown time unit: " << *unit; CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0; return 0;
} }
@ -337,38 +395,41 @@ double Results::GetTime(BenchmarkTime which) const {
// ========================================================================= // // ========================================================================= //
TestCase::TestCase(std::string re, int rule) TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)), : regex_str(std::move(re)), match_rule(rule), substituted_regex(internal::PerformSubstitutions(regex_str)),
match_rule(rule), regex(std::make_shared<benchmark::Regex>())
substituted_regex(internal::PerformSubstitutions(regex_str)), {
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str; std::string err_str;
regex->Init(substituted_regex, &err_str); regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\""
<< "\""
<< "\n originally \"" << regex_str << "\"" << "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str; << "\n got error: " << err_str;
} }
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) { int AddCases(TestCaseID ID, std::initializer_list<TestCase> il)
auto& L = internal::GetTestCaseList(ID); {
auto &L = internal::GetTestCaseList(ID);
L.insert(L.end(), il); L.insert(L.end(), il);
return 0; return 0;
} }
int SetSubstitutions( int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il)
std::initializer_list<std::pair<std::string, std::string>> il) { {
auto& subs = internal::GetSubstitutions(); auto &subs = internal::GetSubstitutions();
for (auto KV : il) { for (auto KV : il)
{
bool exists = false; bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second); KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) { for (auto &EKV : subs)
if (EKV.first == KV.first) { {
if (EKV.first == KV.first)
{
EKV.second = std::move(KV.second); EKV.second = std::move(KV.second);
exists = true; exists = true;
break; break;
} }
} }
if (!exists) subs.push_back(std::move(KV)); if (!exists)
subs.push_back(std::move(KV));
} }
return 0; return 0;
} }
@ -379,35 +440,34 @@ int SetSubstitutions(
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif #endif
void RunOutputTests(int argc, char* argv[]) { void RunOutputTests(int argc, char *argv[])
{
using internal::GetTestCaseList; using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::ConsoleReporter CR(options); benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR; benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR; benchmark::CSVReporter CSVR;
struct ReporterTest { struct ReporterTest
const char* name; {
std::vector<TestCase>& output_cases; const char *name;
std::vector<TestCase>& error_cases; std::vector<TestCase> &output_cases;
benchmark::BenchmarkReporter& reporter; std::vector<TestCase> &error_cases;
benchmark::BenchmarkReporter &reporter;
std::stringstream out_stream; std::stringstream out_stream;
std::stringstream err_stream; std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc, ReporterTest(const char *n, std::vector<TestCase> &out_tc, std::vector<TestCase> &err_tc,
std::vector<TestCase>& err_tc, benchmark::BenchmarkReporter &br)
benchmark::BenchmarkReporter& br) : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { {
reporter.SetOutputStream(&out_stream); reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream); reporter.SetErrorStream(&err_stream);
} }
} TestCases[] = { } TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR},
GetTestCaseList(TC_ConsoleErr), CR}, {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), JR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), CSVR},
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
}; };
// Create the test reporter and run the benchmarks. // Create the test reporter and run the benchmarks.
@ -415,7 +475,8 @@ void RunOutputTests(int argc, char* argv[]) {
internal::TestReporter test_rep({&CR, &JR, &CSVR}); internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep); benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) { for (auto &rep_test : TestCases)
{
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-'); std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n"; std::cout << banner << msg << banner << "\n";
@ -431,7 +492,7 @@ void RunOutputTests(int argc, char* argv[]) {
// now that we know the output is as expected, we can dispatch // now that we know the output is as expected, we can dispatch
// the checks to subscribees. // the checks to subscribees.
auto& csv = TestCases[2]; auto &csv = TestCases[2];
// would use == but gcc spits a warning // would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0); CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream); internal::GetResultsChecker().CheckResults(csv.out_stream);
@ -441,8 +502,10 @@ void RunOutputTests(int argc, char* argv[]) {
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
int SubstrCnt(const std::string& haystack, const std::string& pat) { int SubstrCnt(const std::string &haystack, const std::string &pat)
if (pat.length() == 0) return 0; {
if (pat.length() == 0)
return 0;
int count = 0; int count = 0;
for (size_t offset = haystack.find(pat); offset != std::string::npos; for (size_t offset = haystack.find(pat); offset != std::string::npos;
offset = haystack.find(pat, offset + pat.length())) offset = haystack.find(pat, offset + pat.length()))
@ -450,37 +513,43 @@ int SubstrCnt(const std::string& haystack, const std::string& pat) {
return count; return count;
} }
static char ToHex(int ch) { static char ToHex(int ch)
return ch < 10 ? static_cast<char>('0' + ch) {
: static_cast<char>('a' + (ch - 10)); return ch < 10 ? static_cast<char>('0' + ch) : static_cast<char>('a' + (ch - 10));
} }
static char RandomHexChar() { static char RandomHexChar()
{
static std::mt19937 rd{std::random_device{}()}; static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15}; static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd)); return ToHex(mrand(rd));
} }
static std::string GetRandomFileName() { static std::string GetRandomFileName()
{
std::string model = "test.%%%%%%"; std::string model = "test.%%%%%%";
for (auto & ch : model) { for (auto &ch : model)
{
if (ch == '%') if (ch == '%')
ch = RandomHexChar(); ch = RandomHexChar();
} }
return model; return model;
} }
static bool FileExists(std::string const& name) { static bool FileExists(std::string const &name)
{
std::ifstream in(name.c_str()); std::ifstream in(name.c_str());
return in.good(); return in.good();
} }
static std::string GetTempFileName() { static std::string GetTempFileName()
{
// This function attempts to avoid race conditions where two tests // This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races // create the same file at the same time. However, it still introduces races
// similar to tmpnam. // similar to tmpnam.
int retries = 3; int retries = 3;
while (--retries) { while (--retries)
{
std::string name = GetRandomFileName(); std::string name = GetRandomFileName();
if (!FileExists(name)) if (!FileExists(name))
return name; return name;
@ -489,8 +558,9 @@ static std::string GetTempFileName() {
std::abort(); std::abort();
} }
std::string GetFileReporterOutput(int argc, char* argv[]) { std::string GetFileReporterOutput(int argc, char *argv[])
std::vector<char*> new_argv(argv, argv + argc); {
std::vector<char *> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size()); assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string tmp_file_name = GetTempFileName(); std::string tmp_file_name = GetTempFileName();
@ -498,7 +568,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]) {
std::string tmp = "--benchmark_out="; std::string tmp = "--benchmark_out=";
tmp += tmp_file_name; tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char*>(tmp.c_str())); new_argv.emplace_back(const_cast<char *>(tmp.c_str()));
argc = int(new_argv.size()); argc = int(new_argv.size());
@ -507,8 +577,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]) {
// Read the output back from the file, and delete the file. // Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name); std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::istreambuf_iterator<char>());
std::istreambuf_iterator<char>());
std::remove(tmp_file_name.c_str()); std::remove(tmp_file_name.c_str());
return output; return output;

View File

@ -6,11 +6,14 @@
#include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
{
public: public:
virtual void ReportRuns(const std::vector<Run>& report) { virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report)); all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report); ConsoleReporter::ReportRuns(report);
} }
@ -18,17 +21,22 @@ class TestReporter : public benchmark::ConsoleReporter {
std::vector<Run> all_runs_; std::vector<Run> all_runs_;
}; };
struct TestCase { struct TestCase
{
std::string name; std::string name;
const char* label; const char *label;
// Note: not explicit as we rely on it being converted through ADD_CASES. // Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname) : TestCase(xname, nullptr) {} TestCase(const char *xname) : TestCase(xname, nullptr)
TestCase(const char* xname, const char* xlabel) {
: name(xname), label(xlabel) {} }
TestCase(const char *xname, const char *xlabel) : name(xname), label(xlabel)
{
}
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const { void CheckRun(Run const &run) const
{
// clang-format off // clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got " CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name(); << run.benchmark_name();
@ -44,8 +52,10 @@ struct TestCase {
std::vector<TestCase> ExpectedResults; std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) { int AddCases(std::initializer_list<TestCase> const &v)
for (auto N : v) { {
for (auto N : v)
{
ExpectedResults.push_back(N); ExpectedResults.push_back(N);
} }
return 0; return 0;
@ -57,18 +67,19 @@ int AddCases(std::initializer_list<TestCase> const& v) {
} // end namespace } // end namespace
typedef benchmark::internal::Benchmark* ReturnVal; typedef benchmark::internal::Benchmark *ReturnVal;
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments // Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) { void BM_function(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_function); BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark( ReturnVal dummy = benchmark::RegisterBenchmark("BM_function_manual_registration", BM_function);
"BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"}); ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
@ -78,15 +89,17 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) { void BM_extra_args(benchmark::State &st, const char *label)
for (auto _ : st) { {
for (auto _ : st)
{
} }
st.SetLabel(label); st.SetLabel(label);
} }
int RegisterFromFunction() { int RegisterFromFunction()
std::pair<const char*, const char*> cases[] = { {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}}; std::pair<const char *, const char *> cases[] = {{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const& c : cases) for (auto const &c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second); benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0; return 0;
} }
@ -99,14 +112,18 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
// Test RegisterBenchmark with different callable types // Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
struct CustomFixture { struct CustomFixture
void operator()(benchmark::State& st) { {
for (auto _ : st) { void operator()(benchmark::State &st)
{
for (auto _ : st)
{
} }
} }
}; };
void TestRegistrationAtRuntime() { void TestRegistrationAtRuntime()
{
#ifdef BENCHMARK_HAS_CXX11 #ifdef BENCHMARK_HAS_CXX11
{ {
CustomFixture fx; CustomFixture fx;
@ -116,9 +133,10 @@ void TestRegistrationAtRuntime() {
#endif #endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{ {
const char* x = "42"; const char *x = "42";
auto capturing_lam = [=](benchmark::State& st) { auto capturing_lam = [=](benchmark::State &st) {
for (auto _ : st) { for (auto _ : st)
{
} }
st.SetLabel(x); st.SetLabel(x);
}; };
@ -130,7 +148,8 @@ void TestRegistrationAtRuntime() {
// Test that all benchmarks, registered at either during static init or runtime, // Test that all benchmarks, registered at either during static init or runtime,
// are run and the results are passed to the reported. // are run and the results are passed to the reported.
void RunTestOne() { void RunTestOne()
{
TestRegistrationAtRuntime(); TestRegistrationAtRuntime();
TestReporter test_reporter; TestReporter test_reporter;
@ -139,7 +158,8 @@ void RunTestOne() {
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end()); assert(EB != ExpectedResults.end());
EB->CheckRun(run); EB->CheckRun(run);
++EB; ++EB;
@ -150,9 +170,9 @@ void RunTestOne() {
// Test that ClearRegisteredBenchmarks() clears all previously registered // Test that ClearRegisteredBenchmarks() clears all previously registered
// benchmarks. // benchmarks.
// Also test that new benchmarks can be registered and ran afterwards. // Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() { void RunTestTwo()
assert(ExpectedResults.size() != 0 && {
"must have at least one registered benchmark"); assert(ExpectedResults.size() != 0 && "must have at least one registered benchmark");
ExpectedResults.clear(); ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks(); benchmark::ClearRegisteredBenchmarks();
@ -168,7 +188,8 @@ void RunTestTwo() {
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end()); assert(EB != ExpectedResults.end());
EB->CheckRun(run); EB->CheckRun(run);
++EB; ++EB;
@ -176,7 +197,8 @@ void RunTestTwo() {
assert(EB == ExpectedResults.end()); assert(EB == ExpectedResults.end());
} }
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
RunTestOne(); RunTestOne();

View File

@ -10,21 +10,23 @@
// reporter in the presence of ReportAggregatesOnly(). // reporter in the presence of ReportAggregatesOnly().
// We do not care about console output, the normal tests check that already. // We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv); const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != {
1) {
std::cout << "Precondition mismatch. Expected to only find three " std::cout << "Precondition mismatch. Expected to only find three "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "

View File

@ -9,38 +9,34 @@
// ---------------------- Testing Prologue Output -------------------------- // // ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= // // ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, ADD_CASES(TC_ConsoleOut,
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {"^[-]+$", MR_Next}});
{"^[-]+$", MR_Next}}); static int AddContextCases()
static int AddContextCases() { {
AddCases(TC_ConsoleErr, AddCases(TC_ConsoleErr, {
{
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, {"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
}); });
AddCases(TC_JSONOut, AddCases(TC_JSONOut, {{"^\\{", MR_Default},
{{"^\\{", MR_Default},
{"\"context\":", MR_Next}, {"\"context\":", MR_Next},
{"\"date\": \"", MR_Next}, {"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next}, {"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", MR_Next},
MR_Next},
{"\"num_cpus\": %int,$", MR_Next}, {"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next}, {"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"cpu_scaling_enabled\": ", MR_Next}, {"\"cpu_scaling_enabled\": ", MR_Next},
{"\"caches\": \\[$", MR_Next}}); {"\"caches\": \\[$", MR_Next}});
auto const& Info = benchmark::CPUInfo::Get(); auto const &Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches; auto const &Caches = Info.caches;
if (!Caches.empty()) { if (!Caches.empty())
{
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
} }
for (size_t I = 0; I < Caches.size(); ++I) { for (size_t I = 0; I < Caches.size(); ++I)
std::string num_caches_str = {
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; std::string num_caches_str = Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr, AddCases(TC_ConsoleErr, {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, MR_Next}});
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next}, AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next}, {"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next}, {"\"level\": %int,$", MR_Next},
@ -49,10 +45,10 @@ static int AddContextCases() {
{"}[,]{0,1}$", MR_Next}}); {"}[,]{0,1}$", MR_Next}});
} }
AddCases(TC_JSONOut, {{"],$"}}); AddCases(TC_JSONOut, {{"],$"}});
auto const& LoadAvg = Info.load_avg; auto const &LoadAvg = Info.load_avg;
if (!LoadAvg.empty()) { if (!LoadAvg.empty())
AddCases(TC_ConsoleErr, {
{{"Load Average: (%float, ){0,2}%float$", MR_Next}}); AddCases(TC_ConsoleErr, {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
} }
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}}); AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0; return 0;
@ -64,8 +60,10 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ------------------------ Testing Basic Output --------------------------- // // ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_basic(benchmark::State& state) { void BM_basic(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_basic); BENCHMARK(BM_basic);
@ -88,8 +86,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ------------------------ Testing Bytes per Second Output ---------------- // // ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) { void BM_bytes_per_second(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -117,8 +117,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ------------------------ Testing Items per Second Output ---------------- // // ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_items_per_second(benchmark::State& state) { void BM_items_per_second(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -146,8 +148,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ------------------------ Testing Label Output --------------------------- // // ------------------------ Testing Label Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_label(benchmark::State& state) { void BM_label(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
state.SetLabel("some label"); state.SetLabel("some label");
} }
@ -173,9 +177,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
// ------------------------ Testing Error Output --------------------------- // // ------------------------ Testing Error Output --------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_error(benchmark::State& state) { void BM_error(benchmark::State &state)
{
state.SkipWithError("message"); state.SkipWithError("message");
for (auto _ : state) { for (auto _ : state)
{
} }
} }
BENCHMARK(BM_error); BENCHMARK(BM_error);
@ -196,8 +202,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// // // //
// ========================================================================= // // ========================================================================= //
void BM_no_arg_name(benchmark::State& state) { void BM_no_arg_name(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_no_arg_name)->Arg(3); BENCHMARK(BM_no_arg_name)->Arg(3);
@ -214,8 +222,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ------------------------ Testing Arg Name Output ----------------------- // // ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_arg_name(benchmark::State& state) { void BM_arg_name(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
@ -232,15 +242,15 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ------------------------ Testing Arg Names Output ----------------------- // // ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_arg_names(benchmark::State& state) { void BM_arg_names(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -252,44 +262,46 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ------------------------ Testing Big Args Output ------------------------ // // ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_BigArgs(benchmark::State& state) { void BM_BigArgs(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, {"^BM_BigArgs/2147483648 %console_report$"}});
{"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= // // ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- // // ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) { void BM_Complexity_O1(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
state.SetComplexityN(state.range(0)); state.SetComplexityN(state.range(0));
} }
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, {"%RMS", "[ ]*[0-9]+ %"}});
{"%RMS", "[ ]*[0-9]+ %"}}); ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= // // ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ // // ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= // // ========================================================================= //
// Test that non-aggregate data is printed by default // Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) { void BM_Repeat(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
// need two repetitions min to be able to output any aggregate output // need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2); BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
{{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"}, {"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"}, {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
@ -334,8 +346,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat.. // but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3); BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"}, {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
@ -388,8 +399,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure // median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4); BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
{{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"}, {"^BM_Repeat/repeats:4 %console_report$"},
@ -451,8 +461,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
// Test that a non-repeated test still prints non-aggregate results even when // Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested // only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) { void BM_RepeatOnce(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
@ -466,19 +478,18 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported // Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) { void BM_SummaryRepeat(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"}, {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"}, {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
@ -508,19 +519,18 @@ ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
// Test that non-aggregate data is not displayed. // Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output. // NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything... // But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) { void BM_SummaryDisplay(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly(); BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
TC_ConsoleOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"}, {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"}, {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}}); {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
@ -542,31 +552,26 @@ ADD_CASES(TC_JSONOut,
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}}); {"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut, ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"}, {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"}, {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}}); {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit. // Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) { void BM_RepeatTimeUnit(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
} }
BENCHMARK(BM_RepeatTimeUnit) BENCHMARK(BM_RepeatTimeUnit)->Repetitions(3)->ReportAggregatesOnly()->Unit(benchmark::kMicrosecond);
->Repetitions(3) ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(
TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"}, {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ " {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"}, "]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ " {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}}); "]*3$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
@ -591,8 +596,7 @@ ADD_CASES(TC_JSONOut,
{"\"aggregate_name\": \"stddev\",$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}}); {"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut, ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
@ -601,11 +605,11 @@ ADD_CASES(TC_CSVOut,
// -------------------- Testing user-provided statistics ------------------- // // -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= // // ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) { const auto UserStatistics = [](const std::vector<double> &v) { return v.back(); };
return v.back(); void BM_UserStats(benchmark::State &state)
}; {
void BM_UserStats(benchmark::State& state) { for (auto _ : state)
for (auto _ : state) { {
state.SetIterationTime(150 / 10e8); state.SetIterationTime(150 / 10e8);
} }
} }
@ -633,11 +637,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ " {"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}}); "[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES( ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
TC_JSONOut, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
@ -645,8 +646,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next}, {"\"repetition_index\": 1,$", MR_Next},
@ -654,8 +654,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next}, {"\"repetition_index\": 2,$", MR_Next},
@ -663,8 +662,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next}, {"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
@ -672,8 +670,7 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
@ -681,8 +678,7 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
@ -690,17 +686,14 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next}, {"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next}, {"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next}, {"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}, {"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES( ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
@ -733,9 +726,11 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
// -------------------------- Testing CsvEscape ---------------------------- // // -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_CSV_Format(benchmark::State& state) { void BM_CSV_Format(benchmark::State &state)
{
state.SkipWithError("\"freedom\""); state.SkipWithError("\"freedom\"");
for (auto _ : state) { for (auto _ : state)
{
} }
} }
BENCHMARK(BM_CSV_Format); BENCHMARK(BM_CSV_Format);
@ -745,4 +740,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -6,40 +6,52 @@
#include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h" #include "benchmark/benchmark.h"
namespace { namespace
{
class TestReporter : public benchmark::ConsoleReporter { class TestReporter : public benchmark::ConsoleReporter
{
public: public:
virtual bool ReportContext(const Context& context) { virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context); return ConsoleReporter::ReportContext(context);
}; };
virtual void ReportRuns(const std::vector<Run>& report) { virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report)); all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report); ConsoleReporter::ReportRuns(report);
} }
TestReporter() {} TestReporter()
virtual ~TestReporter() {} {
}
virtual ~TestReporter()
{
}
mutable std::vector<Run> all_runs_; mutable std::vector<Run> all_runs_;
}; };
struct TestCase { struct TestCase
{
std::string name; std::string name;
bool error_occurred; bool error_occurred;
std::string error_message; std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const { void CheckRun(Run const &run) const
CHECK(name == run.benchmark_name()) {
<< "expected " << name << " got " << run.benchmark_name(); CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred); CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message); CHECK(error_message == run.error_message);
if (error_occurred) { if (error_occurred)
{
// CHECK(run.iterations == 0); // CHECK(run.iterations == 0);
} else { }
else
{
CHECK(run.iterations != 0); CHECK(run.iterations != 0);
} }
} }
@ -47,8 +59,10 @@ struct TestCase {
std::vector<TestCase> ExpectedResults; std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) { int AddCases(const char *base_name, std::initializer_list<TestCase> const &v)
for (auto TC : v) { {
for (auto TC : v)
{
TC.name = base_name + TC.name; TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC)); ExpectedResults.push_back(std::move(TC));
} }
@ -61,47 +75,59 @@ int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
} // end namespace } // end namespace
void BM_error_no_running(benchmark::State& state) { void BM_error_no_running(benchmark::State &state)
{
state.SkipWithError("error message"); state.SkipWithError("error message");
} }
BENCHMARK(BM_error_no_running); BENCHMARK(BM_error_no_running);
ADD_CASES("BM_error_no_running", {{"", true, "error message"}}); ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
void BM_error_before_running(benchmark::State& state) { void BM_error_before_running(benchmark::State &state)
{
state.SkipWithError("error message"); state.SkipWithError("error message");
while (state.KeepRunning()) { while (state.KeepRunning())
{
assert(false); assert(false);
} }
} }
BENCHMARK(BM_error_before_running); BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_before_running_batch(benchmark::State& state) { void BM_error_before_running_batch(benchmark::State &state)
{
state.SkipWithError("error message"); state.SkipWithError("error message");
while (state.KeepRunningBatch(17)) { while (state.KeepRunningBatch(17))
{
assert(false); assert(false);
} }
} }
BENCHMARK(BM_error_before_running_batch); BENCHMARK(BM_error_before_running_batch);
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
void BM_error_before_running_range_for(benchmark::State& state) { void BM_error_before_running_range_for(benchmark::State &state)
{
state.SkipWithError("error message"); state.SkipWithError("error message");
for (auto _ : state) { for (auto _ : state)
{
assert(false); assert(false);
} }
} }
BENCHMARK(BM_error_before_running_range_for); BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) { void BM_error_during_running(benchmark::State &state)
{
int first_iter = true; int first_iter = true;
while (state.KeepRunning()) { while (state.KeepRunning())
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter); assert(first_iter);
first_iter = false; first_iter = false;
state.SkipWithError("error message"); state.SkipWithError("error message");
} else { }
else
{
state.PauseTiming(); state.PauseTiming();
state.ResumeTiming(); state.ResumeTiming();
} }
@ -117,12 +143,15 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""}, {"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}}); {"/2/threads:8", false, ""}});
void BM_error_during_running_ranged_for(benchmark::State& state) { void BM_error_during_running_ranged_for(benchmark::State &state)
{
assert(state.max_iterations > 3 && "test requires at least a few iterations"); assert(state.max_iterations > 3 && "test requires at least a few iterations");
int first_iter = true; int first_iter = true;
// NOTE: Users should not write the for loop explicitly. // NOTE: Users should not write the for loop explicitly.
for (auto It = state.begin(), End = state.end(); It != End; ++It) { for (auto It = state.begin(), End = state.end(); It != End; ++It)
if (state.range(0) == 1) { {
if (state.range(0) == 1)
{
assert(first_iter); assert(first_iter);
first_iter = false; first_iter = false;
state.SkipWithError("error message"); state.SkipWithError("error message");
@ -135,11 +164,12 @@ void BM_error_during_running_ranged_for(benchmark::State& state) {
} }
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
ADD_CASES("BM_error_during_running_ranged_for", ADD_CASES("BM_error_during_running_ranged_for",
{{"/1/iterations:5", true, "error message"}, {{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}});
{"/2/iterations:5", false, ""}});
void BM_error_after_running(benchmark::State& state) { void BM_error_after_running(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
if (state.thread_index <= (state.threads / 2)) if (state.thread_index <= (state.threads / 2))
@ -151,15 +181,20 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:4", true, "error message"}, {"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}}); {"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) { void BM_error_while_paused(benchmark::State &state)
{
bool first_iter = true; bool first_iter = true;
while (state.KeepRunning()) { while (state.KeepRunning())
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter); assert(first_iter);
first_iter = false; first_iter = false;
state.PauseTiming(); state.PauseTiming();
state.SkipWithError("error message"); state.SkipWithError("error message");
} else { }
else
{
state.PauseTiming(); state.PauseTiming();
state.ResumeTiming(); state.ResumeTiming();
} }
@ -175,7 +210,8 @@ ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""}, {"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}}); {"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) { int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv); benchmark::Initialize(&argc, argv);
TestReporter test_reporter; TestReporter test_reporter;
@ -184,7 +220,8 @@ int main(int argc, char* argv[]) {
typedef benchmark::BenchmarkReporter::Run Run; typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin(); auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) { for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end()); assert(EB != ExpectedResults.end());
EB->CheckRun(run); EB->CheckRun(run);
++EB; ++EB;

View File

@ -15,14 +15,16 @@ extern "C" {
using benchmark::State; using benchmark::State;
// CHECK-LABEL: test_for_auto_loop: // CHECK-LABEL: test_for_auto_loop:
extern "C" int test_for_auto_loop() { extern "C" int test_for_auto_loop()
State& S = GetState(); {
State &S = GetState();
int x = 42; int x = 42;
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv // CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: testq %rbx, %rbx // CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]] // CHECK-NEXT: je [[LOOP_END:.*]]
for (auto _ : S) { for (auto _ : S)
{
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx // CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}} // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
@ -38,13 +40,15 @@ extern "C" int test_for_auto_loop() {
} }
// CHECK-LABEL: test_while_loop: // CHECK-LABEL: test_while_loop:
extern "C" int test_while_loop() { extern "C" int test_while_loop()
State& S = GetState(); {
State &S = GetState();
int x = 42; int x = 42;
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]] // CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]: // CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning()) { while (S.KeepRunning())
{
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]] // CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]] // CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]] // CHECK: movq %[[IREG]], [[DEST:.*]]

View File

@ -5,24 +5,27 @@
#include "../src/statistics.h" #include "../src/statistics.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
TEST(StatisticsTest, Mean) { {
TEST(StatisticsTest, Mean)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
} }
TEST(StatisticsTest, Median) { TEST(StatisticsTest, Median)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
} }
TEST(StatisticsTest, StdDev) { TEST(StatisticsTest, StdDev)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), 1.151086443322134);
1.151086443322134);
} }
} // end namespace } // end namespace

View File

@ -2,12 +2,14 @@
// statistics_test - Unit tests for src/statistics.cc // statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===// //===---------------------------------------------------------------------===//
#include "../src/string_util.h"
#include "../src/internal_macros.h" #include "../src/internal_macros.h"
#include "../src/string_util.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace { namespace
TEST(StringUtilTest, stoul) { {
TEST(StringUtilTest, stoul)
{
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
@ -68,55 +70,54 @@ TEST(StringUtilTest, stoul) {
#endif #endif
} }
TEST(StringUtilTest, stoi) { TEST(StringUtilTest, stoi){{size_t pos = 0;
{ EXPECT_EQ(0, benchmark::stoi("0", &pos));
size_t pos = 0; EXPECT_EQ(1ul, pos);
EXPECT_EQ(0, benchmark::stoi("0", &pos)); } // namespace
EXPECT_EQ(1ul, pos); {
}
{
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3ul, pos); EXPECT_EQ(3ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos); EXPECT_EQ(4ul, pos);
} }
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS #ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{ {
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
} }
#endif #endif
} }
TEST(StringUtilTest, stod) { TEST(StringUtilTest, stod)
{
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos)); EXPECT_EQ(0.0, benchmark::stod("0", &pos));

View File

@ -4,22 +4,28 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
template <typename T> template <typename T> class MyFixture : public ::benchmark::Fixture
class MyFixture : public ::benchmark::Fixture { {
public: public:
MyFixture() : data(0) {} MyFixture() : data(0)
{
}
T data; T data;
}; };
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st)
for (auto _ : st) { {
for (auto _ : st)
{
data += 1; data += 1;
} }
} }
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State &st)
for (auto _ : st) { {
for (auto _ : st)
{
data += 1.0; data += 1.0;
} }
} }

View File

@ -7,8 +7,7 @@
// @todo: <jpmag> this checks the full output at once; the rule for // @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$". // CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned. // @todo: <jpmag> check that the counters are vertically aligned.
ADD_CASES( ADD_CASES(TC_ConsoleOut,
TC_ConsoleOut,
{ {
// keeping these lines long improves readability, so: // keeping these lines long improves readability, so:
// clang-format off // clang-format off
@ -55,8 +54,10 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ------------------------- Tabular Counters Output ----------------------- // // ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) { void BM_Counters_Tabular(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters.insert({ state.counters.insert({
@ -69,8 +70,7 @@ void BM_Counters_Tabular(benchmark::State& state) {
}); });
} }
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -91,7 +91,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}}); "%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) { void CheckTabular(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
@ -105,8 +106,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// -------------------- Tabular+Rate Counters Output ----------------------- // // -------------------- Tabular+Rate Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) { void BM_CounterRates_Tabular(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -121,10 +124,8 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
}); });
} }
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
@ -144,7 +145,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}}); "%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckTabularRate(Results const& e) { void CheckTabularRate(Results const &e)
{
double t = e.DurationCPUTime(); double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
@ -153,16 +155,17 @@ void CheckTabularRate(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate);
&CheckTabularRate);
// ========================================================================= // // ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- // // ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= // // ========================================================================= //
// set only some of the counters // set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) { void BM_CounterSet0_Tabular(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters.insert({ state.counters.insert({
@ -172,8 +175,7 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
}); });
} }
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -191,7 +193,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}}); "%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet0(Results const& e) { void CheckSet0(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
@ -199,8 +202,10 @@ void CheckSet0(Results const& e) {
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again. // again.
void BM_CounterSet1_Tabular(benchmark::State& state) { void BM_CounterSet1_Tabular(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters.insert({ state.counters.insert({
@ -210,8 +215,7 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
}); });
} }
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -229,7 +233,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}}); "%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet1(Results const& e) { void CheckSet1(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
@ -241,8 +246,10 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// ========================================================================= // // ========================================================================= //
// set only some of the counters, different set now. // set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) { void BM_CounterSet2_Tabular(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters.insert({ state.counters.insert({
@ -252,8 +259,7 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
}); });
} }
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -271,7 +277,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}}); ",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSet2(Results const& e) { void CheckSet2(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
@ -282,4 +289,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -22,15 +22,16 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ------------------------- Simple Counters Output ------------------------ // // ------------------------- Simple Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) { void BM_Counters_Simple(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
state.counters["foo"] = 1; state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations(); state.counters["bar"] = 2 * (double)state.iterations();
} }
BENCHMARK(BM_Counters_Simple); BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -47,7 +48,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckSimple(Results const& e) { void CheckSimple(Results const &e)
{
double its = e.NumIterations(); double its = e.NumIterations();
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the value of bar is within 0.1% of the expected value // check that the value of bar is within 0.1% of the expected value
@ -59,11 +61,14 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
// --------------------- Counters+Items+Bytes/s Output --------------------- // // --------------------- Counters+Items+Bytes/s Output --------------------- //
// ========================================================================= // // ========================================================================= //
namespace { namespace
{
int num_calls1 = 0; int num_calls1 = 0;
} }
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { void BM_Counters_WithBytesAndItemsPSec(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -76,8 +81,7 @@ BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"bar=%hrfloat bytes_per_second=%hrfloat/s " "bar=%hrfloat bytes_per_second=%hrfloat/s "
"foo=%hrfloat items_per_second=%hrfloat/s$"}}); "foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -96,7 +100,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}}); "%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckBytesAndItemsPSec(Results const& e) { void CheckBytesAndItemsPSec(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
@ -104,15 +109,16 @@ void CheckBytesAndItemsPSec(Results const& e) {
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec);
&CheckBytesAndItemsPSec);
// ========================================================================= // // ========================================================================= //
// ------------------------- Rate Counters Output -------------------------- // // ------------------------- Rate Counters Output -------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) { void BM_Counters_Rate(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -121,9 +127,7 @@ void BM_Counters_Rate(benchmark::State& state) {
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
} }
BENCHMARK(BM_Counters_Rate); BENCHMARK(BM_Counters_Rate);
ADD_CASES( ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -140,7 +144,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckRate(Results const& e) { void CheckRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
@ -152,8 +157,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ----------------------- Inverted Counters Output ------------------------ // // ----------------------- Inverted Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Invert(benchmark::State& state) { void BM_Invert(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -162,8 +169,7 @@ void BM_Invert(benchmark::State& state) {
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert}; state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
} }
BENCHMARK(BM_Invert); BENCHMARK(BM_Invert);
ADD_CASES(TC_ConsoleOut, ADD_CASES(TC_ConsoleOut, {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
{"\"run_name\": \"BM_Invert\",$", MR_Next}, {"\"run_name\": \"BM_Invert\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
@ -180,7 +186,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckInvert(Results const& e) { void CheckInvert(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
} }
@ -191,22 +198,21 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
// -------------------------- // // -------------------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_InvertedRate(benchmark::State& state) { void BM_Counters_InvertedRate(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert}; state.counters["bar"] = bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] =
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
} }
BENCHMARK(BM_Counters_InvertedRate); BENCHMARK(BM_Counters_InvertedRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
"bar=%hrfloats foo=%hrfloats$"}}); "bar=%hrfloats foo=%hrfloats$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_InvertedRate\",$"},
{{"\"name\": \"BM_Counters_InvertedRate\",$"},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -219,11 +225,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_CSVOut, ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckInvertedRate(Results const& e) { void CheckInvertedRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
@ -235,8 +241,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
// ------------------------- Thread Counters Output ------------------------ // // ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) { void BM_Counters_Threads(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
state.counters["foo"] = 1; state.counters["foo"] = 1;
state.counters["bar"] = 2; state.counters["bar"] = 2;
@ -244,8 +252,7 @@ void BM_Counters_Threads(benchmark::State& state) {
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -258,12 +265,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES( ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckThreads(Results const& e) { void CheckThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
} }
@ -273,8 +279,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ---------------------- ThreadAvg Counters Output ------------------------ // // ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) { void BM_Counters_AvgThreads(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
@ -283,8 +291,7 @@ void BM_Counters_AvgThreads(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}}); "%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -297,24 +304,24 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES( ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreads(Results const& e) { void CheckAvgThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", &CheckAvgThreads);
&CheckAvgThreads);
// ========================================================================= // // ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ // // ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) { void BM_Counters_AvgThreadsRate(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
@ -325,10 +332,8 @@ void BM_Counters_AvgThreadsRate(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", MR_Next},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
@ -344,19 +349,21 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
"threads:%int\",%csv_report,%float,%float$"}}); "threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreadsRate(Results const& e) { void CheckAvgThreadsRate(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", &CheckAvgThreadsRate);
&CheckAvgThreadsRate);
// ========================================================================= // // ========================================================================= //
// ------------------- IterationInvariant Counters Output ------------------ // // ------------------- IterationInvariant Counters Output ------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_IterationInvariant(benchmark::State& state) { void BM_Counters_IterationInvariant(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
@ -365,8 +372,7 @@ void BM_Counters_IterationInvariant(benchmark::State& state) {
BENCHMARK(BM_Counters_IterationInvariant); BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -379,41 +385,38 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_CSVOut, ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckIterationInvariant(Results const& e) { void CheckIterationInvariant(Results const &e)
{
double its = e.NumIterations(); double its = e.NumIterations();
// check that the values are within 0.1% of the expected value // check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", &CheckIterationInvariant);
&CheckIterationInvariant);
// ========================================================================= // // ========================================================================= //
// ----------------- IterationInvariantRate Counters Output ---------------- // // ----------------- IterationInvariantRate Counters Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { void BM_Counters_kIsIterationInvariantRate(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
} }
BENCHMARK(BM_Counters_kIsIterationInvariantRate); BENCHMARK(BM_Counters_kIsIterationInvariantRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", MR_Next},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next},
@ -429,22 +432,24 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
"%float,%float$"}}); "%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckIsIterationInvariantRate(Results const& e) { void CheckIsIterationInvariantRate(Results const &e)
{
double its = e.NumIterations(); double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate);
&CheckIsIterationInvariantRate);
// ========================================================================= // // ========================================================================= //
// ------------------- AvgIterations Counters Output ------------------ // // ------------------- AvgIterations Counters Output ------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_AvgIterations(benchmark::State& state) { void BM_Counters_AvgIterations(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
@ -453,8 +458,7 @@ void BM_Counters_AvgIterations(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgIterations); BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}}); "bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"},
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -467,11 +471,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next}, {"\"foo\": %float$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_CSVOut, ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterations(Results const& e) { void CheckAvgIterations(Results const &e)
{
double its = e.NumIterations(); double its = e.NumIterations();
// check that the values are within 0.1% of the expected value // check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
@ -483,21 +487,21 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
// ----------------- AvgIterationsRate Counters Output ---------------- // // ----------------- AvgIterationsRate Counters Output ---------------- //
// ========================================================================= // // ========================================================================= //
void BM_Counters_kAvgIterationsRate(benchmark::State& state) { void BM_Counters_kAvgIterationsRate(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] = state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
} }
BENCHMARK(BM_Counters_kAvgIterationsRate); BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next}, {"\"repetitions\": 0,$", MR_Next},
@ -514,18 +518,21 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}}); "%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterationsRate(Results const& e) { void CheckAvgIterationsRate(Results const &e)
{
double its = e.NumIterations(); double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values // check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
} }
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", &CheckAvgIterationsRate);
&CheckAvgIterationsRate);
// ========================================================================= // // ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -8,27 +8,22 @@
// ------------------------ Thousands Customisation ------------------------ // // ------------------------ Thousands Customisation ------------------------ //
// ========================================================================= // // ========================================================================= //
void BM_Counters_Thousands(benchmark::State& state) { void BM_Counters_Thousands(benchmark::State &state)
for (auto _ : state) { {
for (auto _ : state)
{
} }
namespace bm = benchmark; namespace bm = benchmark;
state.counters.insert({ state.counters.insert({
{"t0_1000000DefaultBase", {"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
benchmark::Counter::OneK::kIs1000)}, {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
}); });
} }
BENCHMARK(BM_Counters_Thousands)->Repetitions(2); BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
ADD_CASES( ADD_CASES(TC_ConsoleOut, {
TC_ConsoleOut,
{
{"^BM_Counters_Thousands/repeats:2 %console_report " {"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k " "t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
@ -49,8 +44,7 @@ ADD_CASES(
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
}); });
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
@ -66,8 +60,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
@ -83,8 +76,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
@ -100,8 +92,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
@ -117,8 +108,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES(TC_JSONOut, ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next}, {"\"repetitions\": 2,$", MR_Next},
@ -135,9 +125,7 @@ ADD_CASES(TC_JSONOut,
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next}, {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}}); {"}", MR_Next}});
ADD_CASES( ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Thousands/"
TC_CSVOut,
{{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"}, "0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/" {"^\"BM_Counters_Thousands/"
@ -152,13 +140,13 @@ ADD_CASES(
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}}); {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
// VS2013 does not allow this function to be passed as a lambda argument // VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS() // to CHECK_BENCHMARK_RESULTS()
void CheckThousands(Results const& e) { void CheckThousands(Results const &e)
{
if (e.name != "BM_Counters_Thousands/repeats:2") if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates! return; // Do not check the aggregates!
// check that the values are within 0.01% of the expected values // check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, 0.0001);
0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001); CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
@ -170,4 +158,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
// --------------------------- TEST CASES END ------------------------------ // // --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= // // ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -40,7 +40,8 @@
#include "gtest/internal/gtest-death-test-internal.h" #include "gtest/internal/gtest-death-test-internal.h"
namespace testing { namespace testing
{
// This flag controls the style of death tests. Valid values are "threadsafe", // This flag controls the style of death tests. Valid values are "threadsafe",
// meaning that the death test child process will re-execute the test binary // meaning that the death test child process will re-execute the test binary
@ -51,7 +52,8 @@ GTEST_DECLARE_string_(death_test_style);
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
namespace internal { namespace internal
{
// Returns a Boolean value indicating whether the caller is currently // Returns a Boolean value indicating whether the caller is currently
// executing in the context of the death test child process. Tools such as // executing in the context of the death test child process. Tools such as
@ -165,51 +167,51 @@ GTEST_API_ bool InDeathTestChild();
// Asserts that a given statement causes the program to exit, with an // Asserts that a given statement causes the program to exit, with an
// integer exit status that satisfies predicate, and emitting error output // integer exit status that satisfies predicate, and emitting error output
// that matches regex. // that matches regex.
# define ASSERT_EXIT(statement, predicate, regex) \ #define ASSERT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
// Like ASSERT_EXIT, but continues on to successive tests in the // Like ASSERT_EXIT, but continues on to successive tests in the
// test suite, if any: // test suite, if any:
# define EXPECT_EXIT(statement, predicate, regex) \ #define EXPECT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
// Asserts that a given statement causes the program to exit, either by // Asserts that a given statement causes the program to exit, either by
// explicitly exiting with a nonzero exit code or being killed by a // explicitly exiting with a nonzero exit code or being killed by a
// signal, and emitting error output that matches regex. // signal, and emitting error output that matches regex.
# define ASSERT_DEATH(statement, regex) \ #define ASSERT_DEATH(statement, regex) ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Like ASSERT_DEATH, but continues on to successive tests in the // Like ASSERT_DEATH, but continues on to successive tests in the
// test suite, if any: // test suite, if any:
# define EXPECT_DEATH(statement, regex) \ #define EXPECT_DEATH(statement, regex) EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: // Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
// Tests that an exit code describes a normal exit with a given exit code. // Tests that an exit code describes a normal exit with a given exit code.
class GTEST_API_ ExitedWithCode { class GTEST_API_ ExitedWithCode
{
public: public:
explicit ExitedWithCode(int exit_code); explicit ExitedWithCode(int exit_code);
bool operator()(int exit_status) const; bool operator()(int exit_status) const;
private: private:
// No implementation - assignment is unsupported. // No implementation - assignment is unsupported.
void operator=(const ExitedWithCode& other); void operator=(const ExitedWithCode &other);
const int exit_code_; const int exit_code_;
}; };
# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA #if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
// Tests that an exit code describes an exit due to termination by a // Tests that an exit code describes an exit due to termination by a
// given signal. // given signal.
// GOOGLETEST_CM0006 DO NOT DELETE // GOOGLETEST_CM0006 DO NOT DELETE
class GTEST_API_ KilledBySignal { class GTEST_API_ KilledBySignal
{
public: public:
explicit KilledBySignal(int signum); explicit KilledBySignal(int signum);
bool operator()(int exit_status) const; bool operator()(int exit_status) const;
private: private:
const int signum_; const int signum_;
}; };
# endif // !GTEST_OS_WINDOWS #endif // !GTEST_OS_WINDOWS
// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. // EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
// The death testing framework causes this to have interesting semantics, // The death testing framework causes this to have interesting semantics,
@ -254,23 +256,19 @@ class GTEST_API_ KilledBySignal {
// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); // EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
// }, "death"); // }, "death");
// //
# ifdef NDEBUG #ifdef NDEBUG
# define EXPECT_DEBUG_DEATH(statement, regex) \ #define EXPECT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
GTEST_EXECUTE_STATEMENT_(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \ #define ASSERT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
GTEST_EXECUTE_STATEMENT_(statement, regex)
# else #else
# define EXPECT_DEBUG_DEATH(statement, regex) \ #define EXPECT_DEBUG_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
EXPECT_DEATH(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \ #define ASSERT_DEBUG_DEATH(statement, regex) ASSERT_DEATH(statement, regex)
ASSERT_DEATH(statement, regex)
# endif // NDEBUG for EXPECT_DEBUG_DEATH #endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST #endif // GTEST_HAS_DEATH_TEST
// This macro is used for implementing macros such as // This macro is used for implementing macros such as
@ -308,17 +306,20 @@ class GTEST_API_ KilledBySignal {
// statement unconditionally returns or throws. The Message constructor at // statement unconditionally returns or throws. The Message constructor at
// the end allows the syntax of streaming additional messages into the // the end allows the syntax of streaming additional messages into the
// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. // macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \ #define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
GTEST_LOG_(WARNING) \ { \
<< "Death tests are not supported on this platform.\n" \ GTEST_LOG_(WARNING) << "Death tests are not supported on this platform.\n" \
<< "Statement '" #statement "' cannot be verified."; \ << "Statement '" #statement "' cannot be verified."; \
} else if (::testing::internal::AlwaysFalse()) { \ } \
else if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::RE::PartialMatch(".*", (regex)); \ ::testing::internal::RE::PartialMatch(".*", (regex)); \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
terminator; \ terminator; \
} else \ } \
else \
::testing::Message() ::testing::Message()
// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and // EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
@ -327,15 +328,11 @@ class GTEST_API_ KilledBySignal {
// useful when you are combining death test assertions with normal test // useful when you are combining death test assertions with normal test
// assertions in one test. // assertions in one test.
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ #define EXPECT_DEATH_IF_SUPPORTED(statement, regex) EXPECT_DEATH(statement, regex)
EXPECT_DEATH(statement, regex) #define ASSERT_DEATH_IF_SUPPORTED(statement, regex) ASSERT_DEATH(statement, regex)
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
ASSERT_DEATH(statement, regex)
#else #else
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ #define EXPECT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, ) #define ASSERT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return )
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return)
#endif #endif
} // namespace testing } // namespace testing

View File

@ -55,12 +55,12 @@
#define GTEST_MAYBE_5046_ #define GTEST_MAYBE_5046_
#endif #endif
GTEST_DISABLE_MSC_WARNINGS_PUSH_( GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by
4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by
clients of class B */ clients of class B */
/* Symbol involving type with internal linkage not defined */) /* Symbol involving type with internal linkage not defined */)
namespace testing { namespace testing
{
// To implement a matcher Foo for type T, define: // To implement a matcher Foo for type T, define:
// 1. a class FooMatcherImpl that implements the // 1. a class FooMatcherImpl that implements the
@ -77,52 +77,66 @@ namespace testing {
// MatchResultListener is an abstract class. Its << operator can be // MatchResultListener is an abstract class. Its << operator can be
// used by a matcher to explain why a value matches or doesn't match. // used by a matcher to explain why a value matches or doesn't match.
// //
class MatchResultListener { class MatchResultListener
{
public: public:
// Creates a listener object with the given underlying ostream. The // Creates a listener object with the given underlying ostream. The
// listener does not own the ostream, and does not dereference it // listener does not own the ostream, and does not dereference it
// in the constructor or destructor. // in the constructor or destructor.
explicit MatchResultListener(::std::ostream* os) : stream_(os) {} explicit MatchResultListener(::std::ostream *os) : stream_(os)
{
}
virtual ~MatchResultListener() = 0; // Makes this class abstract. virtual ~MatchResultListener() = 0; // Makes this class abstract.
// Streams x to the underlying ostream; does nothing if the ostream // Streams x to the underlying ostream; does nothing if the ostream
// is NULL. // is NULL.
template <typename T> template <typename T> MatchResultListener &operator<<(const T &x)
MatchResultListener& operator<<(const T& x) { {
if (stream_ != nullptr) *stream_ << x; if (stream_ != nullptr)
*stream_ << x;
return *this; return *this;
} }
// Returns the underlying ostream. // Returns the underlying ostream.
::std::ostream* stream() { return stream_; } ::std::ostream *stream()
{
return stream_;
}
// Returns true if and only if the listener is interested in an explanation // Returns true if and only if the listener is interested in an explanation
// of the match result. A matcher's MatchAndExplain() method can use // of the match result. A matcher's MatchAndExplain() method can use
// this information to avoid generating the explanation when no one // this information to avoid generating the explanation when no one
// intends to hear it. // intends to hear it.
bool IsInterested() const { return stream_ != nullptr; } bool IsInterested() const
{
return stream_ != nullptr;
}
private: private:
::std::ostream* const stream_; ::std::ostream *const stream_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener); GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener);
}; };
inline MatchResultListener::~MatchResultListener() { inline MatchResultListener::~MatchResultListener()
{
} }
// An instance of a subclass of this knows how to describe itself as a // An instance of a subclass of this knows how to describe itself as a
// matcher. // matcher.
class MatcherDescriberInterface { class MatcherDescriberInterface
{
public: public:
virtual ~MatcherDescriberInterface() {} virtual ~MatcherDescriberInterface()
{
}
// Describes this matcher to an ostream. The function should print // Describes this matcher to an ostream. The function should print
// a verb phrase that describes the property a value matching this // a verb phrase that describes the property a value matching this
// matcher should have. The subject of the verb phrase is the value // matcher should have. The subject of the verb phrase is the value
// being matched. For example, the DescribeTo() method of the Gt(7) // being matched. For example, the DescribeTo() method of the Gt(7)
// matcher prints "is greater than 7". // matcher prints "is greater than 7".
virtual void DescribeTo(::std::ostream* os) const = 0; virtual void DescribeTo(::std::ostream *os) const = 0;
// Describes the negation of this matcher to an ostream. For // Describes the negation of this matcher to an ostream. For
// example, if the description of this matcher is "is greater than // example, if the description of this matcher is "is greater than
@ -130,7 +144,8 @@ class MatcherDescriberInterface {
// You are not required to override this when implementing // You are not required to override this when implementing
// MatcherInterface, but it is highly advised so that your matcher // MatcherInterface, but it is highly advised so that your matcher
// can produce good error messages. // can produce good error messages.
virtual void DescribeNegationTo(::std::ostream* os) const { virtual void DescribeNegationTo(::std::ostream *os) const
{
*os << "not ("; *os << "not (";
DescribeTo(os); DescribeTo(os);
*os << ")"; *os << ")";
@ -138,8 +153,8 @@ class MatcherDescriberInterface {
}; };
// The implementation of a matcher. // The implementation of a matcher.
template <typename T> template <typename T> class MatcherInterface : public MatcherDescriberInterface
class MatcherInterface : public MatcherDescriberInterface { {
public: public:
// Returns true if and only if the matcher matches x; also explains the // Returns true if and only if the matcher matches x; also explains the
// match result to 'listener' if necessary (see the next paragraph), in // match result to 'listener' if necessary (see the next paragraph), in
@ -172,69 +187,99 @@ class MatcherInterface : public MatcherDescriberInterface {
// can talk to 'listener' without checking its validity first. // can talk to 'listener' without checking its validity first.
// However, in order to implement dummy listeners efficiently, // However, in order to implement dummy listeners efficiently,
// listener->stream() may be NULL. // listener->stream() may be NULL.
virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0; virtual bool MatchAndExplain(T x, MatchResultListener *listener) const = 0;
// Inherits these methods from MatcherDescriberInterface: // Inherits these methods from MatcherDescriberInterface:
// virtual void DescribeTo(::std::ostream* os) const = 0; // virtual void DescribeTo(::std::ostream* os) const = 0;
// virtual void DescribeNegationTo(::std::ostream* os) const; // virtual void DescribeNegationTo(::std::ostream* os) const;
}; };
namespace internal { namespace internal
{
// Converts a MatcherInterface<T> to a MatcherInterface<const T&>. // Converts a MatcherInterface<T> to a MatcherInterface<const T&>.
template <typename T> template <typename T> class MatcherInterfaceAdapter : public MatcherInterface<const T &>
class MatcherInterfaceAdapter : public MatcherInterface<const T&> { {
public: public:
explicit MatcherInterfaceAdapter(const MatcherInterface<T>* impl) explicit MatcherInterfaceAdapter(const MatcherInterface<T> *impl) : impl_(impl)
: impl_(impl) {} {
~MatcherInterfaceAdapter() override { delete impl_; } }
~MatcherInterfaceAdapter() override
{
delete impl_;
}
void DescribeTo(::std::ostream* os) const override { impl_->DescribeTo(os); } void DescribeTo(::std::ostream *os) const override
{
impl_->DescribeTo(os);
}
void DescribeNegationTo(::std::ostream* os) const override { void DescribeNegationTo(::std::ostream *os) const override
{
impl_->DescribeNegationTo(os); impl_->DescribeNegationTo(os);
} }
bool MatchAndExplain(const T& x, bool MatchAndExplain(const T &x, MatchResultListener *listener) const override
MatchResultListener* listener) const override { {
return impl_->MatchAndExplain(x, listener); return impl_->MatchAndExplain(x, listener);
} }
private: private:
const MatcherInterface<T>* const impl_; const MatcherInterface<T> *const impl_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(MatcherInterfaceAdapter); GTEST_DISALLOW_COPY_AND_ASSIGN_(MatcherInterfaceAdapter);
}; };
struct AnyEq { struct AnyEq
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a == b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a == b;
}
}; };
struct AnyNe { struct AnyNe
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a != b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a != b;
}
}; };
struct AnyLt { struct AnyLt
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a < b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a < b;
}
}; };
struct AnyGt { struct AnyGt
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a > b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a > b;
}
}; };
struct AnyLe { struct AnyLe
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a <= b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a <= b;
}
}; };
struct AnyGe { struct AnyGe
template <typename A, typename B> {
bool operator()(const A& a, const B& b) const { return a >= b; } template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a >= b;
}
}; };
// A match result listener that ignores the explanation. // A match result listener that ignores the explanation.
class DummyMatchResultListener : public MatchResultListener { class DummyMatchResultListener : public MatchResultListener
{
public: public:
DummyMatchResultListener() : MatchResultListener(nullptr) {} DummyMatchResultListener() : MatchResultListener(nullptr)
{
}
private: private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener); GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener);
@ -243,10 +288,12 @@ class DummyMatchResultListener : public MatchResultListener {
// A match result listener that forwards the explanation to a given // A match result listener that forwards the explanation to a given
// ostream. The difference between this and MatchResultListener is // ostream. The difference between this and MatchResultListener is
// that the former is concrete. // that the former is concrete.
class StreamMatchResultListener : public MatchResultListener { class StreamMatchResultListener : public MatchResultListener
{
public: public:
explicit StreamMatchResultListener(::std::ostream* os) explicit StreamMatchResultListener(::std::ostream *os) : MatchResultListener(os)
: MatchResultListener(os) {} {
}
private: private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener); GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener);
@ -255,31 +302,38 @@ class StreamMatchResultListener : public MatchResultListener {
// An internal class for implementing Matcher<T>, which will derive // An internal class for implementing Matcher<T>, which will derive
// from it. We put functionalities common to all Matcher<T> // from it. We put functionalities common to all Matcher<T>
// specializations here to avoid code duplication. // specializations here to avoid code duplication.
template <typename T> template <typename T> class MatcherBase
class MatcherBase { {
public: public:
// Returns true if and only if the matcher matches x; also explains the // Returns true if and only if the matcher matches x; also explains the
// match result to 'listener'. // match result to 'listener'.
bool MatchAndExplain(const T& x, MatchResultListener* listener) const { bool MatchAndExplain(const T &x, MatchResultListener *listener) const
{
return impl_->MatchAndExplain(x, listener); return impl_->MatchAndExplain(x, listener);
} }
// Returns true if and only if this matcher matches x. // Returns true if and only if this matcher matches x.
bool Matches(const T& x) const { bool Matches(const T &x) const
{
DummyMatchResultListener dummy; DummyMatchResultListener dummy;
return MatchAndExplain(x, &dummy); return MatchAndExplain(x, &dummy);
} }
// Describes this matcher to an ostream. // Describes this matcher to an ostream.
void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } void DescribeTo(::std::ostream *os) const
{
impl_->DescribeTo(os);
}
// Describes the negation of this matcher to an ostream. // Describes the negation of this matcher to an ostream.
void DescribeNegationTo(::std::ostream* os) const { void DescribeNegationTo(::std::ostream *os) const
{
impl_->DescribeNegationTo(os); impl_->DescribeNegationTo(os);
} }
// Explains why x matches, or doesn't match, the matcher. // Explains why x matches, or doesn't match, the matcher.
void ExplainMatchResultTo(const T& x, ::std::ostream* os) const { void ExplainMatchResultTo(const T &x, ::std::ostream *os) const
{
StreamMatchResultListener listener(os); StreamMatchResultListener listener(os);
MatchAndExplain(x, &listener); MatchAndExplain(x, &listener);
} }
@ -287,32 +341,39 @@ class MatcherBase {
// Returns the describer for this matcher object; retains ownership // Returns the describer for this matcher object; retains ownership
// of the describer, which is only guaranteed to be alive when // of the describer, which is only guaranteed to be alive when
// this matcher object is alive. // this matcher object is alive.
const MatcherDescriberInterface* GetDescriber() const { const MatcherDescriberInterface *GetDescriber() const
{
return impl_.get(); return impl_.get();
} }
protected: protected:
MatcherBase() {} MatcherBase()
{
}
// Constructs a matcher from its implementation. // Constructs a matcher from its implementation.
explicit MatcherBase(const MatcherInterface<const T&>* impl) : impl_(impl) {} explicit MatcherBase(const MatcherInterface<const T &> *impl) : impl_(impl)
{
}
template <typename U> template <typename U>
explicit MatcherBase( explicit MatcherBase(const MatcherInterface<U> *impl,
const MatcherInterface<U>* impl, typename std::enable_if<!std::is_same<U, const U &>::value>::type * = nullptr)
typename std::enable_if<!std::is_same<U, const U&>::value>::type* = : impl_(new internal::MatcherInterfaceAdapter<U>(impl))
nullptr) {
: impl_(new internal::MatcherInterfaceAdapter<U>(impl)) {} }
MatcherBase(const MatcherBase&) = default; MatcherBase(const MatcherBase &) = default;
MatcherBase& operator=(const MatcherBase&) = default; MatcherBase &operator=(const MatcherBase &) = default;
MatcherBase(MatcherBase&&) = default; MatcherBase(MatcherBase &&) = default;
MatcherBase& operator=(MatcherBase&&) = default; MatcherBase &operator=(MatcherBase &&) = default;
virtual ~MatcherBase() {} virtual ~MatcherBase()
{
}
private: private:
std::shared_ptr<const MatcherInterface<const T&>> impl_; std::shared_ptr<const MatcherInterface<const T &>> impl_;
}; };
} // namespace internal } // namespace internal
@ -321,24 +382,27 @@ class MatcherBase {
// object that can check whether a value of type T matches. The // object that can check whether a value of type T matches. The
// implementation of Matcher<T> is just a std::shared_ptr to const // implementation of Matcher<T> is just a std::shared_ptr to const
// MatcherInterface<T>. Don't inherit from Matcher! // MatcherInterface<T>. Don't inherit from Matcher!
template <typename T> template <typename T> class Matcher : public internal::MatcherBase<T>
class Matcher : public internal::MatcherBase<T> { {
public: public:
// Constructs a null matcher. Needed for storing Matcher objects in STL // Constructs a null matcher. Needed for storing Matcher objects in STL
// containers. A default-constructed matcher is not yet initialized. You // containers. A default-constructed matcher is not yet initialized. You
// cannot use it until a valid value has been assigned to it. // cannot use it until a valid value has been assigned to it.
explicit Matcher() {} // NOLINT explicit Matcher()
{
} // NOLINT
// Constructs a matcher from its implementation. // Constructs a matcher from its implementation.
explicit Matcher(const MatcherInterface<const T&>* impl) explicit Matcher(const MatcherInterface<const T &> *impl) : internal::MatcherBase<T>(impl)
: internal::MatcherBase<T>(impl) {} {
}
template <typename U> template <typename U>
explicit Matcher( explicit Matcher(const MatcherInterface<U> *impl,
const MatcherInterface<U>* impl, typename std::enable_if<!std::is_same<U, const U &>::value>::type * = nullptr)
typename std::enable_if<!std::is_same<U, const U&>::value>::type* = : internal::MatcherBase<T>(impl)
nullptr) {
: internal::MatcherBase<T>(impl) {} }
// Implicit constructor here allows people to write // Implicit constructor here allows people to write
// EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes
@ -348,40 +412,46 @@ class Matcher : public internal::MatcherBase<T> {
// The following two specializations allow the user to write str // The following two specializations allow the user to write str
// instead of Eq(str) and "foo" instead of Eq("foo") when a std::string // instead of Eq(str) and "foo" instead of Eq("foo") when a std::string
// matcher is expected. // matcher is expected.
template <> template <> class GTEST_API_ Matcher<const std::string &> : public internal::MatcherBase<const std::string &>
class GTEST_API_ Matcher<const std::string&> {
: public internal::MatcherBase<const std::string&> {
public: public:
Matcher() {} Matcher()
{
}
explicit Matcher(const MatcherInterface<const std::string&>* impl) explicit Matcher(const MatcherInterface<const std::string &> *impl)
: internal::MatcherBase<const std::string&>(impl) {} : internal::MatcherBase<const std::string &>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where // Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object. // str is a std::string object.
Matcher(const std::string& s); // NOLINT Matcher(const std::string &s); // NOLINT
// Allows the user to write "foo" instead of Eq("foo") sometimes. // Allows the user to write "foo" instead of Eq("foo") sometimes.
Matcher(const char* s); // NOLINT Matcher(const char *s); // NOLINT
}; };
template <> template <> class GTEST_API_ Matcher<std::string> : public internal::MatcherBase<std::string>
class GTEST_API_ Matcher<std::string> {
: public internal::MatcherBase<std::string> {
public: public:
Matcher() {} Matcher()
{
}
explicit Matcher(const MatcherInterface<const std::string&>* impl) explicit Matcher(const MatcherInterface<const std::string &> *impl) : internal::MatcherBase<std::string>(impl)
: internal::MatcherBase<std::string>(impl) {} {
explicit Matcher(const MatcherInterface<std::string>* impl) }
: internal::MatcherBase<std::string>(impl) {} explicit Matcher(const MatcherInterface<std::string> *impl) : internal::MatcherBase<std::string>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where // Allows the user to write str instead of Eq(str) sometimes, where
// str is a string object. // str is a string object.
Matcher(const std::string& s); // NOLINT Matcher(const std::string &s); // NOLINT
// Allows the user to write "foo" instead of Eq("foo") sometimes. // Allows the user to write "foo" instead of Eq("foo") sometimes.
Matcher(const char* s); // NOLINT Matcher(const char *s); // NOLINT
}; };
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
@ -389,42 +459,50 @@ class GTEST_API_ Matcher<std::string>
// instead of Eq(str) and "foo" instead of Eq("foo") when a absl::string_view // instead of Eq(str) and "foo" instead of Eq("foo") when a absl::string_view
// matcher is expected. // matcher is expected.
template <> template <>
class GTEST_API_ Matcher<const absl::string_view&> class GTEST_API_ Matcher<const absl::string_view &> : public internal::MatcherBase<const absl::string_view &>
: public internal::MatcherBase<const absl::string_view&> { {
public: public:
Matcher() {} Matcher()
{
}
explicit Matcher(const MatcherInterface<const absl::string_view&>* impl) explicit Matcher(const MatcherInterface<const absl::string_view &> *impl)
: internal::MatcherBase<const absl::string_view&>(impl) {} : internal::MatcherBase<const absl::string_view &>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where // Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object. // str is a std::string object.
Matcher(const std::string& s); // NOLINT Matcher(const std::string &s); // NOLINT
// Allows the user to write "foo" instead of Eq("foo") sometimes. // Allows the user to write "foo" instead of Eq("foo") sometimes.
Matcher(const char* s); // NOLINT Matcher(const char *s); // NOLINT
// Allows the user to pass absl::string_views directly. // Allows the user to pass absl::string_views directly.
Matcher(absl::string_view s); // NOLINT Matcher(absl::string_view s); // NOLINT
}; };
template <> template <> class GTEST_API_ Matcher<absl::string_view> : public internal::MatcherBase<absl::string_view>
class GTEST_API_ Matcher<absl::string_view> {
: public internal::MatcherBase<absl::string_view> {
public: public:
Matcher() {} Matcher()
{
}
explicit Matcher(const MatcherInterface<const absl::string_view&>* impl) explicit Matcher(const MatcherInterface<const absl::string_view &> *impl)
: internal::MatcherBase<absl::string_view>(impl) {} : internal::MatcherBase<absl::string_view>(impl)
explicit Matcher(const MatcherInterface<absl::string_view>* impl) {
: internal::MatcherBase<absl::string_view>(impl) {} }
explicit Matcher(const MatcherInterface<absl::string_view> *impl) : internal::MatcherBase<absl::string_view>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where // Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object. // str is a std::string object.
Matcher(const std::string& s); // NOLINT Matcher(const std::string &s); // NOLINT
// Allows the user to write "foo" instead of Eq("foo") sometimes. // Allows the user to write "foo" instead of Eq("foo") sometimes.
Matcher(const char* s); // NOLINT Matcher(const char *s); // NOLINT
// Allows the user to pass absl::string_views directly. // Allows the user to pass absl::string_views directly.
Matcher(absl::string_view s); // NOLINT Matcher(absl::string_view s); // NOLINT
@ -432,8 +510,8 @@ class GTEST_API_ Matcher<absl::string_view>
#endif // GTEST_HAS_ABSL #endif // GTEST_HAS_ABSL
// Prints a matcher in a human-readable format. // Prints a matcher in a human-readable format.
template <typename T> template <typename T> std::ostream &operator<<(std::ostream &os, const Matcher<T> &matcher)
std::ostream& operator<<(std::ostream& os, const Matcher<T>& matcher) { {
matcher.DescribeTo(&os); matcher.DescribeTo(&os);
return os; return os;
} }
@ -450,37 +528,52 @@ std::ostream& operator<<(std::ostream& os, const Matcher<T>& matcher) {
// MatchResultListener* listener) const; // MatchResultListener* listener) const;
// //
// See the definition of NotNull() for a complete example. // See the definition of NotNull() for a complete example.
template <class Impl> template <class Impl> class PolymorphicMatcher
class PolymorphicMatcher { {
public: public:
explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {} explicit PolymorphicMatcher(const Impl &an_impl) : impl_(an_impl)
{
}
// Returns a mutable reference to the underlying matcher // Returns a mutable reference to the underlying matcher
// implementation object. // implementation object.
Impl& mutable_impl() { return impl_; } Impl &mutable_impl()
{
return impl_;
}
// Returns an immutable reference to the underlying matcher // Returns an immutable reference to the underlying matcher
// implementation object. // implementation object.
const Impl& impl() const { return impl_; } const Impl &impl() const
{
return impl_;
}
template <typename T> template <typename T> operator Matcher<T>() const
operator Matcher<T>() const { {
return Matcher<T>(new MonomorphicImpl<const T&>(impl_)); return Matcher<T>(new MonomorphicImpl<const T &>(impl_));
} }
private: private:
template <typename T> template <typename T> class MonomorphicImpl : public MatcherInterface<T>
class MonomorphicImpl : public MatcherInterface<T> { {
public: public:
explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} explicit MonomorphicImpl(const Impl &impl) : impl_(impl)
{
}
virtual void DescribeTo(::std::ostream* os) const { impl_.DescribeTo(os); } virtual void DescribeTo(::std::ostream *os) const
{
impl_.DescribeTo(os);
}
virtual void DescribeNegationTo(::std::ostream* os) const { virtual void DescribeNegationTo(::std::ostream *os) const
{
impl_.DescribeNegationTo(os); impl_.DescribeNegationTo(os);
} }
virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { virtual bool MatchAndExplain(T x, MatchResultListener *listener) const
{
return impl_.MatchAndExplain(x, listener); return impl_.MatchAndExplain(x, listener);
} }
@ -497,8 +590,8 @@ class PolymorphicMatcher {
// //
// MakeMatcher may create a Matcher that accepts its argument by value, which // MakeMatcher may create a Matcher that accepts its argument by value, which
// leads to unnecessary copies & lack of support for non-copyable types. // leads to unnecessary copies & lack of support for non-copyable types.
template <typename T> template <typename T> inline Matcher<T> MakeMatcher(const MatcherInterface<T> *impl)
inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) { {
return Matcher<T>(impl); return Matcher<T>(impl);
} }
@ -509,12 +602,13 @@ inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
// MakePolymorphicMatcher(foo); // MakePolymorphicMatcher(foo);
// vs // vs
// PolymorphicMatcher<TypeOfFoo>(foo); // PolymorphicMatcher<TypeOfFoo>(foo);
template <class Impl> template <class Impl> inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl &impl)
inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) { {
return PolymorphicMatcher<Impl>(impl); return PolymorphicMatcher<Impl>(impl);
} }
namespace internal { namespace internal
{
// Implements a matcher that compares a given value with a // Implements a matcher that compares a given value with a
// pre-supplied value using one of the ==, <=, <, etc, operators. The // pre-supplied value using one of the ==, <=, <, etc, operators. The
// two values being compared don't have to have the same type. // two values being compared don't have to have the same type.
@ -525,34 +619,44 @@ namespace internal {
// //
// The following template definition assumes that the Rhs parameter is // The following template definition assumes that the Rhs parameter is
// a "bare" type (i.e. neither 'const T' nor 'T&'). // a "bare" type (i.e. neither 'const T' nor 'T&').
template <typename D, typename Rhs, typename Op> template <typename D, typename Rhs, typename Op> class ComparisonBase
class ComparisonBase { {
public: public:
explicit ComparisonBase(const Rhs& rhs) : rhs_(rhs) {} explicit ComparisonBase(const Rhs &rhs) : rhs_(rhs)
template <typename Lhs> {
operator Matcher<Lhs>() const { }
return Matcher<Lhs>(new Impl<const Lhs&>(rhs_)); template <typename Lhs> operator Matcher<Lhs>() const
{
return Matcher<Lhs>(new Impl<const Lhs &>(rhs_));
} }
private: private:
template <typename T> template <typename T> static const T &Unwrap(const T &v)
static const T& Unwrap(const T& v) { return v; } {
template <typename T> return v;
static const T& Unwrap(std::reference_wrapper<T> v) { return v; } }
template <typename T> static const T &Unwrap(std::reference_wrapper<T> v)
{
return v;
}
template <typename Lhs, typename = Rhs> template <typename Lhs, typename = Rhs> class Impl : public MatcherInterface<Lhs>
class Impl : public MatcherInterface<Lhs> { {
public: public:
explicit Impl(const Rhs& rhs) : rhs_(rhs) {} explicit Impl(const Rhs &rhs) : rhs_(rhs)
bool MatchAndExplain(Lhs lhs, {
MatchResultListener* /* listener */) const override { }
bool MatchAndExplain(Lhs lhs, MatchResultListener * /* listener */) const override
{
return Op()(lhs, Unwrap(rhs_)); return Op()(lhs, Unwrap(rhs_));
} }
void DescribeTo(::std::ostream* os) const override { void DescribeTo(::std::ostream *os) const override
{
*os << D::Desc() << " "; *os << D::Desc() << " ";
UniversalPrint(Unwrap(rhs_), os); UniversalPrint(Unwrap(rhs_), os);
} }
void DescribeNegationTo(::std::ostream* os) const override { void DescribeNegationTo(::std::ostream *os) const override
{
*os << D::NegatedDesc() << " "; *os << D::NegatedDesc() << " ";
UniversalPrint(Unwrap(rhs_), os); UniversalPrint(Unwrap(rhs_), os);
} }
@ -563,66 +667,110 @@ class ComparisonBase {
Rhs rhs_; Rhs rhs_;
}; };
template <typename Rhs> template <typename Rhs> class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>
class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq> { {
public: public:
explicit EqMatcher(const Rhs& rhs) explicit EqMatcher(const Rhs &rhs) : ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs)
: ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs) { } {
static const char* Desc() { return "is equal to"; } }
static const char* NegatedDesc() { return "isn't equal to"; } static const char *Desc()
{
return "is equal to";
}
static const char *NegatedDesc()
{
return "isn't equal to";
}
}; };
template <typename Rhs> template <typename Rhs> class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>
class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe> { {
public: public:
explicit NeMatcher(const Rhs& rhs) explicit NeMatcher(const Rhs &rhs) : ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs)
: ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs) { } {
static const char* Desc() { return "isn't equal to"; } }
static const char* NegatedDesc() { return "is equal to"; } static const char *Desc()
{
return "isn't equal to";
}
static const char *NegatedDesc()
{
return "is equal to";
}
}; };
template <typename Rhs> template <typename Rhs> class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>
class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt> { {
public: public:
explicit LtMatcher(const Rhs& rhs) explicit LtMatcher(const Rhs &rhs) : ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs)
: ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs) { } {
static const char* Desc() { return "is <"; } }
static const char* NegatedDesc() { return "isn't <"; } static const char *Desc()
{
return "is <";
}
static const char *NegatedDesc()
{
return "isn't <";
}
}; };
template <typename Rhs> template <typename Rhs> class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>
class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt> { {
public: public:
explicit GtMatcher(const Rhs& rhs) explicit GtMatcher(const Rhs &rhs) : ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs)
: ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs) { } {
static const char* Desc() { return "is >"; } }
static const char* NegatedDesc() { return "isn't >"; } static const char *Desc()
{
return "is >";
}
static const char *NegatedDesc()
{
return "isn't >";
}
}; };
template <typename Rhs> template <typename Rhs> class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>
class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe> { {
public: public:
explicit LeMatcher(const Rhs& rhs) explicit LeMatcher(const Rhs &rhs) : ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs)
: ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs) { } {
static const char* Desc() { return "is <="; } }
static const char* NegatedDesc() { return "isn't <="; } static const char *Desc()
{
return "is <=";
}
static const char *NegatedDesc()
{
return "isn't <=";
}
}; };
template <typename Rhs> template <typename Rhs> class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>
class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe> { {
public: public:
explicit GeMatcher(const Rhs& rhs) explicit GeMatcher(const Rhs &rhs) : ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs)
: ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs) { } {
static const char* Desc() { return "is >="; } }
static const char* NegatedDesc() { return "isn't >="; } static const char *Desc()
{
return "is >=";
}
static const char *NegatedDesc()
{
return "isn't >=";
}
}; };
// Implements polymorphic matchers MatchesRegex(regex) and // Implements polymorphic matchers MatchesRegex(regex) and
// ContainsRegex(regex), which can be used as a Matcher<T> as long as // ContainsRegex(regex), which can be used as a Matcher<T> as long as
// T can be converted to a string. // T can be converted to a string.
class MatchesRegexMatcher { class MatchesRegexMatcher
{
public: public:
MatchesRegexMatcher(const RE* regex, bool full_match) MatchesRegexMatcher(const RE *regex, bool full_match) : regex_(regex), full_match_(full_match)
: regex_(regex), full_match_(full_match) {} {
}
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
bool MatchAndExplain(const absl::string_view& s, bool MatchAndExplain(const absl::string_view &s, MatchResultListener *listener) const
MatchResultListener* listener) const { {
return MatchAndExplain(std::string(s), listener); return MatchAndExplain(std::string(s), listener);
} }
#endif // GTEST_HAS_ABSL #endif // GTEST_HAS_ABSL
@ -632,8 +780,8 @@ class MatchesRegexMatcher {
// char* // char*
// const wchar_t* // const wchar_t*
// wchar_t* // wchar_t*
template <typename CharType> template <typename CharType> bool MatchAndExplain(CharType *s, MatchResultListener *listener) const
bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { {
return s != nullptr && MatchAndExplain(std::string(s), listener); return s != nullptr && MatchAndExplain(std::string(s), listener);
} }
@ -642,21 +790,21 @@ class MatchesRegexMatcher {
// This is a template, not just a plain function with const std::string&, // This is a template, not just a plain function with const std::string&,
// because absl::string_view has some interfering non-explicit constructors. // because absl::string_view has some interfering non-explicit constructors.
template <class MatcheeStringType> template <class MatcheeStringType>
bool MatchAndExplain(const MatcheeStringType& s, bool MatchAndExplain(const MatcheeStringType &s, MatchResultListener * /* listener */) const
MatchResultListener* /* listener */) const { {
const std::string& s2(s); const std::string &s2(s);
return full_match_ ? RE::FullMatch(s2, *regex_) return full_match_ ? RE::FullMatch(s2, *regex_) : RE::PartialMatch(s2, *regex_);
: RE::PartialMatch(s2, *regex_);
} }
void DescribeTo(::std::ostream* os) const { void DescribeTo(::std::ostream *os) const
{
*os << (full_match_ ? "matches" : "contains") << " regular expression "; *os << (full_match_ ? "matches" : "contains") << " regular expression ";
UniversalPrinter<std::string>::Print(regex_->pattern(), os); UniversalPrinter<std::string>::Print(regex_->pattern(), os);
} }
void DescribeNegationTo(::std::ostream* os) const { void DescribeNegationTo(::std::ostream *os) const
*os << "doesn't " << (full_match_ ? "match" : "contain") {
<< " regular expression "; *os << "doesn't " << (full_match_ ? "match" : "contain") << " regular expression ";
UniversalPrinter<std::string>::Print(regex_->pattern(), os); UniversalPrinter<std::string>::Print(regex_->pattern(), os);
} }
@ -668,36 +816,40 @@ class MatchesRegexMatcher {
// Matches a string that fully matches regular expression 'regex'. // Matches a string that fully matches regular expression 'regex'.
// The matcher takes ownership of 'regex'. // The matcher takes ownership of 'regex'.
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex( inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(const internal::RE *regex)
const internal::RE* regex) { {
return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true)); return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true));
} }
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex( inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(const std::string &regex)
const std::string& regex) { {
return MatchesRegex(new internal::RE(regex)); return MatchesRegex(new internal::RE(regex));
} }
// Matches a string that contains regular expression 'regex'. // Matches a string that contains regular expression 'regex'.
// The matcher takes ownership of 'regex'. // The matcher takes ownership of 'regex'.
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex( inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(const internal::RE *regex)
const internal::RE* regex) { {
return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false)); return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false));
} }
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex( inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(const std::string &regex)
const std::string& regex) { {
return ContainsRegex(new internal::RE(regex)); return ContainsRegex(new internal::RE(regex));
} }
// Creates a polymorphic matcher that matches anything equal to x. // Creates a polymorphic matcher that matches anything equal to x.
// Note: if the parameter of Eq() were declared as const T&, Eq("foo") // Note: if the parameter of Eq() were declared as const T&, Eq("foo")
// wouldn't compile. // wouldn't compile.
template <typename T> template <typename T> inline internal::EqMatcher<T> Eq(T x)
inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); } {
return internal::EqMatcher<T>(x);
}
// Constructs a Matcher<T> from a 'value' of type T. The constructed // Constructs a Matcher<T> from a 'value' of type T. The constructed
// matcher matches any value that's equal to 'value'. // matcher matches any value that's equal to 'value'.
template <typename T> template <typename T> Matcher<T>::Matcher(T value)
Matcher<T>::Matcher(T value) { *this = Eq(value); } {
*this = Eq(value);
}
// Creates a monomorphic matcher that matches anything with type Lhs // Creates a monomorphic matcher that matches anything with type Lhs
// and equal to rhs. A user may need to use this instead of Eq(...) // and equal to rhs. A user may need to use this instead of Eq(...)
@ -711,36 +863,38 @@ Matcher<T>::Matcher(T value) { *this = Eq(value); }
// it yet as those are used much less than Eq() in practice. A user // it yet as those are used much less than Eq() in practice. A user
// can always write Matcher<T>(Lt(5)) to be explicit about the type, // can always write Matcher<T>(Lt(5)) to be explicit about the type,
// for example. // for example.
template <typename Lhs, typename Rhs> template <typename Lhs, typename Rhs> inline Matcher<Lhs> TypedEq(const Rhs &rhs)
inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); } {
return Eq(rhs);
}
// Creates a polymorphic matcher that matches anything >= x. // Creates a polymorphic matcher that matches anything >= x.
template <typename Rhs> template <typename Rhs> inline internal::GeMatcher<Rhs> Ge(Rhs x)
inline internal::GeMatcher<Rhs> Ge(Rhs x) { {
return internal::GeMatcher<Rhs>(x); return internal::GeMatcher<Rhs>(x);
} }
// Creates a polymorphic matcher that matches anything > x. // Creates a polymorphic matcher that matches anything > x.
template <typename Rhs> template <typename Rhs> inline internal::GtMatcher<Rhs> Gt(Rhs x)
inline internal::GtMatcher<Rhs> Gt(Rhs x) { {
return internal::GtMatcher<Rhs>(x); return internal::GtMatcher<Rhs>(x);
} }
// Creates a polymorphic matcher that matches anything <= x. // Creates a polymorphic matcher that matches anything <= x.
template <typename Rhs> template <typename Rhs> inline internal::LeMatcher<Rhs> Le(Rhs x)
inline internal::LeMatcher<Rhs> Le(Rhs x) { {
return internal::LeMatcher<Rhs>(x); return internal::LeMatcher<Rhs>(x);
} }
// Creates a polymorphic matcher that matches anything < x. // Creates a polymorphic matcher that matches anything < x.
template <typename Rhs> template <typename Rhs> inline internal::LtMatcher<Rhs> Lt(Rhs x)
inline internal::LtMatcher<Rhs> Lt(Rhs x) { {
return internal::LtMatcher<Rhs>(x); return internal::LtMatcher<Rhs>(x);
} }
// Creates a polymorphic matcher that matches anything != x. // Creates a polymorphic matcher that matches anything != x.
template <typename Rhs> template <typename Rhs> inline internal::NeMatcher<Rhs> Ne(Rhs x)
inline internal::NeMatcher<Rhs> Ne(Rhs x) { {
return internal::NeMatcher<Rhs>(x); return internal::NeMatcher<Rhs>(x);
} }
} // namespace testing } // namespace testing

View File

@ -52,14 +52,14 @@
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
// Ensures that there is at least one operator<< in the global namespace. // Ensures that there is at least one operator<< in the global namespace.
// See Message& operator<<(...) below for why. // See Message& operator<<(...) below for why.
void operator<<(const testing::internal::Secret&, int); void operator<<(const testing::internal::Secret &, int);
namespace testing { namespace testing
{
// The Message class works like an ostream repeater. // The Message class works like an ostream repeater.
// //
@ -87,29 +87,32 @@ namespace testing {
// latter (it causes an access violation if you do). The Message // latter (it causes an access violation if you do). The Message
// class hides this difference by treating a NULL char pointer as // class hides this difference by treating a NULL char pointer as
// "(null)". // "(null)".
class GTEST_API_ Message { class GTEST_API_ Message
{
private: private:
// The type of basic IO manipulators (endl, ends, and flush) for // The type of basic IO manipulators (endl, ends, and flush) for
// narrow streams. // narrow streams.
typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); typedef std::ostream &(*BasicNarrowIoManip)(std::ostream &);
public: public:
// Constructs an empty Message. // Constructs an empty Message.
Message(); Message();
// Copy constructor. // Copy constructor.
Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT Message(const Message &msg) : ss_(new ::std::stringstream)
{ // NOLINT
*ss_ << msg.GetString(); *ss_ << msg.GetString();
} }
// Constructs a Message from a C-string. // Constructs a Message from a C-string.
explicit Message(const char* str) : ss_(new ::std::stringstream) { explicit Message(const char *str) : ss_(new ::std::stringstream)
{
*ss_ << str; *ss_ << str;
} }
// Streams a non-pointer value to this object. // Streams a non-pointer value to this object.
template <typename T> template <typename T> inline Message &operator<<(const T &val)
inline Message& operator <<(const T& val) { {
// Some libraries overload << for STL containers. These // Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std. // overloads are defined in the global namespace instead of ::std.
// //
@ -124,7 +127,7 @@ class GTEST_API_ Message {
// from the global namespace. With this using declaration, // from the global namespace. With this using declaration,
// overloads of << defined in the global namespace and those // overloads of << defined in the global namespace and those
// visible via Koenig lookup are both exposed in this function. // visible via Koenig lookup are both exposed in this function.
using ::operator <<; using ::operator<<;
*ss_ << val; *ss_ << val;
return *this; return *this;
} }
@ -142,11 +145,14 @@ class GTEST_API_ Message {
// may get "0", "(nil)", "(null)", or an access violation. To // may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL // ensure consistent result across compilers, we always treat NULL
// as "(null)". // as "(null)".
template <typename T> template <typename T> inline Message &operator<<(T *const &pointer)
inline Message& operator <<(T* const& pointer) { // NOLINT { // NOLINT
if (pointer == nullptr) { if (pointer == nullptr)
{
*ss_ << "(null)"; *ss_ << "(null)";
} else { }
else
{
*ss_ << pointer; *ss_ << pointer;
} }
return *this; return *this;
@ -158,25 +164,27 @@ class GTEST_API_ Message {
// templatized version above. Without this definition, streaming // templatized version above. Without this definition, streaming
// endl or other basic IO manipulators to Message will confuse the // endl or other basic IO manipulators to Message will confuse the
// compiler. // compiler.
Message& operator <<(BasicNarrowIoManip val) { Message &operator<<(BasicNarrowIoManip val)
{
*ss_ << val; *ss_ << val;
return *this; return *this;
} }
// Instead of 1/0, we want to see true/false for bool values. // Instead of 1/0, we want to see true/false for bool values.
Message& operator <<(bool b) { Message &operator<<(bool b)
{
return *this << (b ? "true" : "false"); return *this << (b ? "true" : "false");
} }
// These two overloads allow streaming a wide C string to a Message // These two overloads allow streaming a wide C string to a Message
// using the UTF-8 encoding. // using the UTF-8 encoding.
Message& operator <<(const wchar_t* wide_c_str); Message &operator<<(const wchar_t *wide_c_str);
Message& operator <<(wchar_t* wide_c_str); Message &operator<<(wchar_t *wide_c_str);
#if GTEST_HAS_STD_WSTRING #if GTEST_HAS_STD_WSTRING
// Converts the given wide string to a narrow string using the UTF-8 // Converts the given wide string to a narrow string using the UTF-8
// encoding, and streams the result to this Message object. // encoding, and streams the result to this Message object.
Message& operator <<(const ::std::wstring& wstr); Message &operator<<(const ::std::wstring &wstr);
#endif // GTEST_HAS_STD_WSTRING #endif // GTEST_HAS_STD_WSTRING
// Gets the text streamed to this object so far as an std::string. // Gets the text streamed to this object so far as an std::string.
@ -187,26 +195,28 @@ class GTEST_API_ Message {
private: private:
// We'll hold the text streamed to this object here. // We'll hold the text streamed to this object here.
const std::unique_ptr< ::std::stringstream> ss_; const std::unique_ptr<::std::stringstream> ss_;
// We declare (but don't implement) this to prevent the compiler // We declare (but don't implement) this to prevent the compiler
// from implementing the assignment operator. // from implementing the assignment operator.
void operator=(const Message&); void operator=(const Message &);
}; };
// Streams a Message to an ostream. // Streams a Message to an ostream.
inline std::ostream& operator <<(std::ostream& os, const Message& sb) { inline std::ostream &operator<<(std::ostream &os, const Message &sb)
{
return os << sb.GetString(); return os << sb.GetString();
} }
namespace internal { namespace internal
{
// Converts a streamable value to an std::string. A NULL pointer is // Converts a streamable value to an std::string. A NULL pointer is
// converted to "(null)". When the input value is a ::string, // converted to "(null)". When the input value is a ::string,
// ::std::string, ::wstring, or ::std::wstring object, each NUL // ::std::string, ::wstring, or ::std::wstring object, each NUL
// character in it is replaced with "\\0". // character in it is replaced with "\\0".
template <typename T> template <typename T> std::string StreamableToString(const T &streamable)
std::string StreamableToString(const T& streamable) { {
return (Message() << streamable).GetString(); return (Message() << streamable).GetString();
} }

View File

@ -36,7 +36,6 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
// Value-parameterized tests allow you to test your code with different // Value-parameterized tests allow you to test your code with different
// parameters without writing multiple copies of the same test. // parameters without writing multiple copies of the same test.
// //
@ -181,7 +180,8 @@ TEST_P(DerivedTest, DoesBlah) {
#include "gtest/internal/gtest-param-util.h" #include "gtest/internal/gtest-param-util.h"
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
namespace testing { namespace testing
{
// Functions producing parameter generators. // Functions producing parameter generators.
// //
@ -225,14 +225,13 @@ namespace testing {
// * Condition start < end must be satisfied in order for resulting sequences // * Condition start < end must be satisfied in order for resulting sequences
// to contain any elements. // to contain any elements.
// //
template <typename T, typename IncrementT> template <typename T, typename IncrementT> internal::ParamGenerator<T> Range(T start, T end, IncrementT step)
internal::ParamGenerator<T> Range(T start, T end, IncrementT step) { {
return internal::ParamGenerator<T>( return internal::ParamGenerator<T>(new internal::RangeGenerator<T, IncrementT>(start, end, step));
new internal::RangeGenerator<T, IncrementT>(start, end, step));
} }
template <typename T> template <typename T> internal::ParamGenerator<T> Range(T start, T end)
internal::ParamGenerator<T> Range(T start, T end) { {
return Range(start, end, 1); return Range(start, end, 1);
} }
@ -292,22 +291,20 @@ internal::ParamGenerator<T> Range(T start, T end) {
// ValuesIn(l.begin(), l.end())); // ValuesIn(l.begin(), l.end()));
// //
template <typename ForwardIterator> template <typename ForwardIterator>
internal::ParamGenerator< internal::ParamGenerator<typename std::iterator_traits<ForwardIterator>::value_type> ValuesIn(ForwardIterator begin,
typename std::iterator_traits<ForwardIterator>::value_type> ForwardIterator end)
ValuesIn(ForwardIterator begin, ForwardIterator end) { {
typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType; typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
return internal::ParamGenerator<ParamType>( return internal::ParamGenerator<ParamType>(new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
} }
template <typename T, size_t N> template <typename T, size_t N> internal::ParamGenerator<T> ValuesIn(const T (&array)[N])
internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) { {
return ValuesIn(array, array + N); return ValuesIn(array, array + N);
} }
template <class Container> template <class Container> internal::ParamGenerator<typename Container::value_type> ValuesIn(const Container &container)
internal::ParamGenerator<typename Container::value_type> ValuesIn( {
const Container& container) {
return ValuesIn(container.begin(), container.end()); return ValuesIn(container.begin(), container.end());
} }
@ -331,8 +328,8 @@ internal::ParamGenerator<typename Container::value_type> ValuesIn(
// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); // INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
// //
// //
template <typename... T> template <typename... T> internal::ValueArray<T...> Values(T... v)
internal::ValueArray<T...> Values(T... v) { {
return internal::ValueArray<T...>(std::move(v)...); return internal::ValueArray<T...>(std::move(v)...);
} }
@ -356,7 +353,8 @@ internal::ValueArray<T...> Values(T... v) {
// } // }
// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool()); // INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool());
// //
inline internal::ParamGenerator<bool> Bool() { inline internal::ParamGenerator<bool> Bool()
{
return Values(false, true); return Values(false, true);
} }
@ -406,37 +404,36 @@ inline internal::ParamGenerator<bool> Bool() {
// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest, // INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest,
// Combine(Bool(), Bool())); // Combine(Bool(), Bool()));
// //
template <typename... Generator> template <typename... Generator> internal::CartesianProductHolder<Generator...> Combine(const Generator &...g)
internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) { {
return internal::CartesianProductHolder<Generator...>(g...); return internal::CartesianProductHolder<Generator...>(g...);
} }
#define TEST_P(test_suite_name, test_name) \ #define TEST_P(test_suite_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \ class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) : public test_suite_name \
: public test_suite_name { \ { \
public: \ public: \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() \
{ \
} \
virtual void TestBody(); \ virtual void TestBody(); \
\ \
private: \ private: \
static int AddToRegistry() { \ static int AddToRegistry() \
{ \
::testing::UnitTest::GetInstance() \ ::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \ ->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \ .GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestPattern( \ ->AddTestPattern( \
GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \ GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_( \ new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)>()); \
test_suite_name, test_name)>()); \
return 0; \ return 0; \
} \ } \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)); \
test_name)); \
}; \ }; \
int GTEST_TEST_CLASS_NAME_(test_suite_name, \ int GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::gtest_registering_dummy_ = \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody() void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
@ -459,42 +456,35 @@ internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \ #define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \ static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
gtest_##prefix##test_suite_name##_EvalGenerator_() { \ gtest_##prefix##test_suite_name##_EvalGenerator_() \
{ \
return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \ return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
} \ } \
static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \ static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
const ::testing::TestParamInfo<test_suite_name::ParamType>& info) { \ const ::testing::TestParamInfo<test_suite_name::ParamType> &info) \
if (::testing::internal::AlwaysFalse()) { \ { \
if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \ ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \ __VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))); \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))); \
auto t = std::make_tuple(__VA_ARGS__); \ auto t = std::make_tuple(__VA_ARGS__); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, \ static_assert(std::tuple_size<decltype(t)>::value <= 2, "Too Many Args!"); \
"Too Many Args!"); \
} \ } \
return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \ return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \ __VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))))(info); \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))))(info); \
} \ } \
static int gtest_##prefix##test_suite_name##_dummy_ \ static int gtest_##prefix##test_suite_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_ATTRIBUTE_UNUSED_ = \
::testing::UnitTest::GetInstance() \ ::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \ ->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \ .GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestSuiteInstantiation( \ ->AddTestSuiteInstantiation(#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \ &gtest_##prefix##test_suite_name##_EvalGenerateName_, __FILE__, __LINE__)
&gtest_##prefix##test_suite_name##_EvalGenerateName_, \
__FILE__, __LINE__)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TEST_CASE_P \ #define INSTANTIATE_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \ static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), ""); \
""); \
INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Google Test - The Google C++ Testing and Mocking Framework // Google Test - The Google C++ Testing and Mocking Framework
// //
// This file implements a universal value printer that can print a // This file implements a universal value printer that can print a
@ -100,6 +99,8 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ #define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-port.h"
#include <functional> #include <functional>
#include <ostream> // NOLINT #include <ostream> // NOLINT
#include <sstream> #include <sstream>
@ -108,8 +109,6 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-port.h"
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"
@ -117,21 +116,22 @@
#include "absl/types/variant.h" #include "absl/types/variant.h"
#endif // GTEST_HAS_ABSL #endif // GTEST_HAS_ABSL
namespace testing { namespace testing
{
// Definitions in the 'internal' and 'internal2' name spaces are // Definitions in the 'internal' and 'internal2' name spaces are
// subject to change without notice. DO NOT USE THEM IN USER CODE! // subject to change without notice. DO NOT USE THEM IN USER CODE!
namespace internal2 { namespace internal2
{
// Prints the given number of bytes in the given object to the given // Prints the given number of bytes in the given object to the given
// ostream. // ostream.
GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, GTEST_API_ void PrintBytesInObjectTo(const unsigned char *obj_bytes, size_t count, ::std::ostream *os);
size_t count,
::std::ostream* os);
// For selecting which printer to use when a given type has neither << // For selecting which printer to use when a given type has neither <<
// nor PrintTo(). // nor PrintTo().
enum TypeKind { enum TypeKind
{
kProtobuf, // a protobuf type kProtobuf, // a protobuf type
kConvertibleToInteger, // a type implicitly convertible to BiggestInt kConvertibleToInteger, // a type implicitly convertible to BiggestInt
// (e.g. a named or unnamed enum type) // (e.g. a named or unnamed enum type)
@ -146,14 +146,13 @@ enum TypeKind {
// by the universal printer to print a value of type T when neither // by the universal printer to print a value of type T when neither
// operator<< nor PrintTo() is defined for T, where kTypeKind is the // operator<< nor PrintTo() is defined for T, where kTypeKind is the
// "kind" of T as defined by enum TypeKind. // "kind" of T as defined by enum TypeKind.
template <typename T, TypeKind kTypeKind> template <typename T, TypeKind kTypeKind> class TypeWithoutFormatter
class TypeWithoutFormatter { {
public: public:
// This default version is called when kTypeKind is kOtherType. // This default version is called when kTypeKind is kOtherType.
static void PrintValue(const T& value, ::std::ostream* os) { static void PrintValue(const T &value, ::std::ostream *os)
PrintBytesInObjectTo( {
static_cast<const unsigned char*>( PrintBytesInObjectTo(static_cast<const unsigned char *>(reinterpret_cast<const void *>(std::addressof(value))),
reinterpret_cast<const void*>(std::addressof(value))),
sizeof(value), os); sizeof(value), os);
} }
}; };
@ -163,20 +162,22 @@ class TypeWithoutFormatter {
// DebugString() for better readability. // DebugString() for better readability.
const size_t kProtobufOneLinerMaxLength = 50; const size_t kProtobufOneLinerMaxLength = 50;
template <typename T> template <typename T> class TypeWithoutFormatter<T, kProtobuf>
class TypeWithoutFormatter<T, kProtobuf> { {
public: public:
static void PrintValue(const T& value, ::std::ostream* os) { static void PrintValue(const T &value, ::std::ostream *os)
{
std::string pretty_str = value.ShortDebugString(); std::string pretty_str = value.ShortDebugString();
if (pretty_str.length() > kProtobufOneLinerMaxLength) { if (pretty_str.length() > kProtobufOneLinerMaxLength)
{
pretty_str = "\n" + value.DebugString(); pretty_str = "\n" + value.DebugString();
} }
*os << ("<" + pretty_str + ">"); *os << ("<" + pretty_str + ">");
} }
}; };
template <typename T> template <typename T> class TypeWithoutFormatter<T, kConvertibleToInteger>
class TypeWithoutFormatter<T, kConvertibleToInteger> { {
public: public:
// Since T has no << operator or PrintTo() but can be implicitly // Since T has no << operator or PrintTo() but can be implicitly
// converted to BiggestInt, we print it as a BiggestInt. // converted to BiggestInt, we print it as a BiggestInt.
@ -185,22 +186,23 @@ class TypeWithoutFormatter<T, kConvertibleToInteger> {
// case printing it as an integer is the desired behavior. In case // case printing it as an integer is the desired behavior. In case
// T is not an enum, printing it as an integer is the best we can do // T is not an enum, printing it as an integer is the best we can do
// given that it has no user-defined printer. // given that it has no user-defined printer.
static void PrintValue(const T& value, ::std::ostream* os) { static void PrintValue(const T &value, ::std::ostream *os)
{
const internal::BiggestInt kBigInt = value; const internal::BiggestInt kBigInt = value;
*os << kBigInt; *os << kBigInt;
} }
}; };
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
template <typename T> template <typename T> class TypeWithoutFormatter<T, kConvertibleToStringView>
class TypeWithoutFormatter<T, kConvertibleToStringView> { {
public: public:
// Since T has neither operator<< nor PrintTo() but can be implicitly // Since T has neither operator<< nor PrintTo() but can be implicitly
// converted to absl::string_view, we print it as a absl::string_view. // converted to absl::string_view, we print it as a absl::string_view.
// //
// Note: the implementation is further below, as it depends on // Note: the implementation is further below, as it depends on
// internal::PrintTo symbol which is defined later in the file. // internal::PrintTo symbol which is defined later in the file.
static void PrintValue(const T& value, ::std::ostream* os); static void PrintValue(const T &value, ::std::ostream *os);
}; };
#endif #endif
@ -229,18 +231,13 @@ class TypeWithoutFormatter<T, kConvertibleToStringView> {
// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more // operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
// specific. // specific.
template <typename Char, typename CharTraits, typename T> template <typename Char, typename CharTraits, typename T>
::std::basic_ostream<Char, CharTraits>& operator<<( ::std::basic_ostream<Char, CharTraits> &operator<<(::std::basic_ostream<Char, CharTraits> &os, const T &x)
::std::basic_ostream<Char, CharTraits>& os, const T& x) { {
TypeWithoutFormatter<T, (internal::IsAProtocolMessage<T>::value TypeWithoutFormatter<T, (internal::IsAProtocolMessage<T>::value ? kProtobuf
? kProtobuf : std::is_convertible<const T &, internal::BiggestInt>::value ? kConvertibleToInteger
: std::is_convertible<
const T&, internal::BiggestInt>::value
? kConvertibleToInteger
: :
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
std::is_convertible< std::is_convertible<const T &, absl::string_view>::value ? kConvertibleToStringView
const T&, absl::string_view>::value
? kConvertibleToStringView
: :
#endif #endif
kOtherType)>::PrintValue(x, &os); kOtherType)>::PrintValue(x, &os);
@ -252,12 +249,13 @@ template <typename Char, typename CharTraits, typename T>
// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up // This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
// magic needed for implementing UniversalPrinter won't work. // magic needed for implementing UniversalPrinter won't work.
namespace testing_internal { namespace testing_internal
{
// Used to print a value that is not an STL-style container when the // Used to print a value that is not an STL-style container when the
// user doesn't define PrintTo() for it. // user doesn't define PrintTo() for it.
template <typename T> template <typename T> void DefaultPrintNonContainerTo(const T &value, ::std::ostream *os)
void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { {
// With the following statement, during unqualified name lookup, // With the following statement, during unqualified name lookup,
// testing::internal2::operator<< appears as if it was declared in // testing::internal2::operator<< appears as if it was declared in
// the nearest enclosing namespace that contains both // the nearest enclosing namespace that contains both
@ -289,8 +287,10 @@ void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
} // namespace testing_internal } // namespace testing_internal
namespace testing { namespace testing
namespace internal { {
namespace internal
{
// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a // FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
// value of type ToPrint that is an operand of a comparison assertion // value of type ToPrint that is an operand of a comparison assertion
@ -307,20 +307,22 @@ namespace internal {
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
// The default case. // The default case.
template <typename ToPrint, typename OtherOperand> template <typename ToPrint, typename OtherOperand> class FormatForComparison
class FormatForComparison { {
public: public:
static ::std::string Format(const ToPrint& value) { static ::std::string Format(const ToPrint &value)
{
return ::testing::PrintToString(value); return ::testing::PrintToString(value);
} }
}; };
// Array. // Array.
template <typename ToPrint, size_t N, typename OtherOperand> template <typename ToPrint, size_t N, typename OtherOperand> class FormatForComparison<ToPrint[N], OtherOperand>
class FormatForComparison<ToPrint[N], OtherOperand> { {
public: public:
static ::std::string Format(const ToPrint* value) { static ::std::string Format(const ToPrint *value)
return FormatForComparison<const ToPrint*, OtherOperand>::Format(value); {
return FormatForComparison<const ToPrint *, OtherOperand>::Format(value);
} }
}; };
@ -328,11 +330,12 @@ class FormatForComparison<ToPrint[N], OtherOperand> {
// whether they actually point to a NUL-terminated string. // whether they actually point to a NUL-terminated string.
#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ #define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \
template <typename OtherOperand> \ template <typename OtherOperand> class FormatForComparison<CharType *, OtherOperand> \
class FormatForComparison<CharType*, OtherOperand> { \ { \
public: \ public: \
static ::std::string Format(CharType* value) { \ static ::std::string Format(CharType *value) \
return ::testing::PrintToString(static_cast<const void*>(value)); \ { \
return ::testing::PrintToString(static_cast<const void *>(value)); \
} \ } \
} }
@ -347,10 +350,11 @@ GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
// to point to a NUL-terminated string, and thus can print it as a string. // to point to a NUL-terminated string, and thus can print it as a string.
#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ #define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
template <> \ template <> class FormatForComparison<CharType *, OtherStringType> \
class FormatForComparison<CharType*, OtherStringType> { \ { \
public: \ public: \
static ::std::string Format(CharType* value) { \ static ::std::string Format(CharType *value) \
{ \
return ::testing::PrintToString(value); \ return ::testing::PrintToString(value); \
} \ } \
} }
@ -374,8 +378,8 @@ GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
// //
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
template <typename T1, typename T2> template <typename T1, typename T2>
std::string FormatForComparisonFailureMessage( std::string FormatForComparisonFailureMessage(const T1 &value, const T2 & /* other_operand */)
const T1& value, const T2& /* other_operand */) { {
return FormatForComparison<T1, T2>::Format(value); return FormatForComparison<T1, T2>::Format(value);
} }
@ -386,33 +390,36 @@ std::string FormatForComparisonFailureMessage(
// We define UniversalPrinter as a class template (as opposed to a // We define UniversalPrinter as a class template (as opposed to a
// function template), as we need to partially specialize it for // function template), as we need to partially specialize it for
// reference types, which cannot be done with function templates. // reference types, which cannot be done with function templates.
template <typename T> template <typename T> class UniversalPrinter;
class UniversalPrinter;
template <typename T> template <typename T> void UniversalPrint(const T &value, ::std::ostream *os);
void UniversalPrint(const T& value, ::std::ostream* os);
enum DefaultPrinterType { enum DefaultPrinterType
{
kPrintContainer, kPrintContainer,
kPrintPointer, kPrintPointer,
kPrintFunctionPointer, kPrintFunctionPointer,
kPrintOther, kPrintOther,
}; };
template <DefaultPrinterType type> struct WrapPrinterType {}; template <DefaultPrinterType type> struct WrapPrinterType
{
};
// Used to print an STL-style container when the user doesn't define // Used to print an STL-style container when the user doesn't define
// a PrintTo() for it. // a PrintTo() for it.
template <typename C> template <typename C>
void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */, void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */, const C &container, ::std::ostream *os)
const C& container, ::std::ostream* os) { {
const size_t kMaxCount = 32; // The maximum number of elements to print. const size_t kMaxCount = 32; // The maximum number of elements to print.
*os << '{'; *os << '{';
size_t count = 0; size_t count = 0;
for (typename C::const_iterator it = container.begin(); for (typename C::const_iterator it = container.begin(); it != container.end(); ++it, ++count)
it != container.end(); ++it, ++count) { {
if (count > 0) { if (count > 0)
{
*os << ','; *os << ',';
if (count == kMaxCount) { // Enough has been printed. if (count == kMaxCount)
{ // Enough has been printed.
*os << " ..."; *os << " ...";
break; break;
} }
@ -423,7 +430,8 @@ void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */,
internal::UniversalPrint(*it, os); internal::UniversalPrint(*it, os);
} }
if (count > 0) { if (count > 0)
{
*os << ' '; *os << ' ';
} }
*os << '}'; *os << '}';
@ -435,36 +443,39 @@ void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */,
// a location in the address space. Their representation is // a location in the address space. Their representation is
// implementation-defined. Therefore they will be printed as raw // implementation-defined. Therefore they will be printed as raw
// bytes.) // bytes.)
template <typename T> template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintPointer> /* dummy */, T *p, ::std::ostream *os)
void DefaultPrintTo(WrapPrinterType<kPrintPointer> /* dummy */, {
T* p, ::std::ostream* os) { if (p == nullptr)
if (p == nullptr) { {
*os << "NULL"; *os << "NULL";
} else { }
else
{
// T is not a function type. We just call << to print p, // T is not a function type. We just call << to print p,
// relying on ADL to pick up user-defined << for their pointer // relying on ADL to pick up user-defined << for their pointer
// types, if any. // types, if any.
*os << p; *os << p;
} }
} }
template <typename T> template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintFunctionPointer> /* dummy */, T *p, ::std::ostream *os)
void DefaultPrintTo(WrapPrinterType<kPrintFunctionPointer> /* dummy */, {
T* p, ::std::ostream* os) { if (p == nullptr)
if (p == nullptr) { {
*os << "NULL"; *os << "NULL";
} else { }
else
{
// T is a function type, so '*os << p' doesn't do what we want // T is a function type, so '*os << p' doesn't do what we want
// (it just prints p as bool). We want to print p as a const // (it just prints p as bool). We want to print p as a const
// void*. // void*.
*os << reinterpret_cast<const void*>(p); *os << reinterpret_cast<const void *>(p);
} }
} }
// Used to print a non-container, non-pointer value when the user // Used to print a non-container, non-pointer value when the user
// doesn't define PrintTo() for it. // doesn't define PrintTo() for it.
template <typename T> template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */, const T &value, ::std::ostream *os)
void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */, {
const T& value, ::std::ostream* os) {
::testing_internal::DefaultPrintNonContainerTo(value, os); ::testing_internal::DefaultPrintNonContainerTo(value, os);
} }
@ -479,8 +490,8 @@ void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */,
// Foo is not desirable (e.g. the coding style may prevent doing it, // Foo is not desirable (e.g. the coding style may prevent doing it,
// or there is already a << operator but it doesn't do what the user // or there is already a << operator but it doesn't do what the user
// wants). // wants).
template <typename T> template <typename T> void PrintTo(const T &value, ::std::ostream *os)
void PrintTo(const T& value, ::std::ostream* os) { {
// DefaultPrintTo() is overloaded. The type of its first argument // DefaultPrintTo() is overloaded. The type of its first argument
// determines which version will be picked. // determines which version will be picked.
// //
@ -500,15 +511,11 @@ void PrintTo(const T& value, ::std::ostream* os) {
// cause this warning, and use a separate overload of DefaultPrintTo for // cause this warning, and use a separate overload of DefaultPrintTo for
// function pointers so that the `*os << p` in the object pointer overload // function pointers so that the `*os << p` in the object pointer overload
// doesn't cause that warning either. // doesn't cause that warning either.
DefaultPrintTo( DefaultPrintTo(WrapPrinterType < (sizeof(IsContainerTest<T>(0)) == sizeof(IsContainer)) &&
WrapPrinterType <
(sizeof(IsContainerTest<T>(0)) == sizeof(IsContainer)) &&
!IsRecursiveContainer<T>::value !IsRecursiveContainer<T>::value
? kPrintContainer ? kPrintContainer
: !std::is_pointer<T>::value : !std::is_pointer<T>::value ? kPrintOther
? kPrintOther : std::is_function<typename std::remove_pointer<T>::type>::value ? kPrintFunctionPointer
: std::is_function<typename std::remove_pointer<T>::type>::value
? kPrintFunctionPointer
: kPrintPointer > (), : kPrintPointer > (),
value, os); value, os);
} }
@ -518,9 +525,10 @@ void PrintTo(const T& value, ::std::ostream* os) {
// types, strings, plain arrays, and pointers). // types, strings, plain arrays, and pointers).
// Overloads for various char types. // Overloads for various char types.
GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); GTEST_API_ void PrintTo(unsigned char c, ::std::ostream *os);
GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); GTEST_API_ void PrintTo(signed char c, ::std::ostream *os);
inline void PrintTo(char c, ::std::ostream* os) { inline void PrintTo(char c, ::std::ostream *os)
{
// When printing a plain char, we always treat it as unsigned. This // When printing a plain char, we always treat it as unsigned. This
// way, the output won't be affected by whether the compiler thinks // way, the output won't be affected by whether the compiler thinks
// char is signed or not. // char is signed or not.
@ -528,7 +536,8 @@ inline void PrintTo(char c, ::std::ostream* os) {
} }
// Overloads for other simple built-in types. // Overloads for other simple built-in types.
inline void PrintTo(bool x, ::std::ostream* os) { inline void PrintTo(bool x, ::std::ostream *os)
{
*os << (x ? "true" : "false"); *os << (x ? "true" : "false");
} }
@ -539,27 +548,32 @@ inline void PrintTo(bool x, ::std::ostream* os) {
// as signed integer when wchar_t is implemented by the compiler // as signed integer when wchar_t is implemented by the compiler
// as a signed type and is printed as an unsigned integer when wchar_t // as a signed type and is printed as an unsigned integer when wchar_t
// is implemented as an unsigned type. // is implemented as an unsigned type.
GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream *os);
// Overloads for C strings. // Overloads for C strings.
GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); GTEST_API_ void PrintTo(const char *s, ::std::ostream *os);
inline void PrintTo(char* s, ::std::ostream* os) { inline void PrintTo(char *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const char*>(s), os); {
PrintTo(ImplicitCast_<const char *>(s), os);
} }
// signed/unsigned char is often used for representing binary data, so // signed/unsigned char is often used for representing binary data, so
// we print pointers to it as void* to be safe. // we print pointers to it as void* to be safe.
inline void PrintTo(const signed char* s, ::std::ostream* os) { inline void PrintTo(const signed char *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const void*>(s), os); {
PrintTo(ImplicitCast_<const void *>(s), os);
} }
inline void PrintTo(signed char* s, ::std::ostream* os) { inline void PrintTo(signed char *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const void*>(s), os); {
PrintTo(ImplicitCast_<const void *>(s), os);
} }
inline void PrintTo(const unsigned char* s, ::std::ostream* os) { inline void PrintTo(const unsigned char *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const void*>(s), os); {
PrintTo(ImplicitCast_<const void *>(s), os);
} }
inline void PrintTo(unsigned char* s, ::std::ostream* os) { inline void PrintTo(unsigned char *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const void*>(s), os); {
PrintTo(ImplicitCast_<const void *>(s), os);
} }
// MSVC can be configured to define wchar_t as a typedef of unsigned // MSVC can be configured to define wchar_t as a typedef of unsigned
@ -569,9 +583,10 @@ inline void PrintTo(unsigned char* s, ::std::ostream* os) {
// possibly causing invalid memory accesses. // possibly causing invalid memory accesses.
#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) #if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
// Overloads for wide C strings // Overloads for wide C strings
GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); GTEST_API_ void PrintTo(const wchar_t *s, ::std::ostream *os);
inline void PrintTo(wchar_t* s, ::std::ostream* os) { inline void PrintTo(wchar_t *s, ::std::ostream *os)
PrintTo(ImplicitCast_<const wchar_t*>(s), os); {
PrintTo(ImplicitCast_<const wchar_t *>(s), os);
} }
#endif #endif
@ -580,72 +595,78 @@ inline void PrintTo(wchar_t* s, ::std::ostream* os) {
// Prints the given number of elements in an array, without printing // Prints the given number of elements in an array, without printing
// the curly braces. // the curly braces.
template <typename T> template <typename T> void PrintRawArrayTo(const T a[], size_t count, ::std::ostream *os)
void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { {
UniversalPrint(a[0], os); UniversalPrint(a[0], os);
for (size_t i = 1; i != count; i++) { for (size_t i = 1; i != count; i++)
{
*os << ", "; *os << ", ";
UniversalPrint(a[i], os); UniversalPrint(a[i], os);
} }
} }
// Overloads for ::std::string. // Overloads for ::std::string.
GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); GTEST_API_ void PrintStringTo(const ::std::string &s, ::std::ostream *os);
inline void PrintTo(const ::std::string& s, ::std::ostream* os) { inline void PrintTo(const ::std::string &s, ::std::ostream *os)
{
PrintStringTo(s, os); PrintStringTo(s, os);
} }
// Overloads for ::std::wstring. // Overloads for ::std::wstring.
#if GTEST_HAS_STD_WSTRING #if GTEST_HAS_STD_WSTRING
GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); GTEST_API_ void PrintWideStringTo(const ::std::wstring &s, ::std::ostream *os);
inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { inline void PrintTo(const ::std::wstring &s, ::std::ostream *os)
{
PrintWideStringTo(s, os); PrintWideStringTo(s, os);
} }
#endif // GTEST_HAS_STD_WSTRING #endif // GTEST_HAS_STD_WSTRING
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
// Overload for absl::string_view. // Overload for absl::string_view.
inline void PrintTo(absl::string_view sp, ::std::ostream* os) { inline void PrintTo(absl::string_view sp, ::std::ostream *os)
{
PrintTo(::std::string(sp), os); PrintTo(::std::string(sp), os);
} }
#endif // GTEST_HAS_ABSL #endif // GTEST_HAS_ABSL
inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; } inline void PrintTo(std::nullptr_t, ::std::ostream *os)
{
*os << "(nullptr)";
}
template <typename T> template <typename T> void PrintTo(std::reference_wrapper<T> ref, ::std::ostream *os)
void PrintTo(std::reference_wrapper<T> ref, ::std::ostream* os) { {
UniversalPrinter<T&>::Print(ref.get(), os); UniversalPrinter<T &>::Print(ref.get(), os);
} }
// Helper function for printing a tuple. T must be instantiated with // Helper function for printing a tuple. T must be instantiated with
// a tuple type. // a tuple type.
template <typename T> template <typename T> void PrintTupleTo(const T &, std::integral_constant<size_t, 0>, ::std::ostream *)
void PrintTupleTo(const T&, std::integral_constant<size_t, 0>, {
::std::ostream*) {} }
template <typename T, size_t I> template <typename T, size_t I> void PrintTupleTo(const T &t, std::integral_constant<size_t, I>, ::std::ostream *os)
void PrintTupleTo(const T& t, std::integral_constant<size_t, I>, {
::std::ostream* os) {
PrintTupleTo(t, std::integral_constant<size_t, I - 1>(), os); PrintTupleTo(t, std::integral_constant<size_t, I - 1>(), os);
GTEST_INTENTIONAL_CONST_COND_PUSH_() GTEST_INTENTIONAL_CONST_COND_PUSH_()
if (I > 1) { if (I > 1)
{
GTEST_INTENTIONAL_CONST_COND_POP_() GTEST_INTENTIONAL_CONST_COND_POP_()
*os << ", "; *os << ", ";
} }
UniversalPrinter<typename std::tuple_element<I - 1, T>::type>::Print( UniversalPrinter<typename std::tuple_element<I - 1, T>::type>::Print(std::get<I - 1>(t), os);
std::get<I - 1>(t), os);
} }
template <typename... Types> template <typename... Types> void PrintTo(const ::std::tuple<Types...> &t, ::std::ostream *os)
void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) { {
*os << "("; *os << "(";
PrintTupleTo(t, std::integral_constant<size_t, sizeof...(Types)>(), os); PrintTupleTo(t, std::integral_constant<size_t, sizeof...(Types)>(), os);
*os << ")"; *os << ")";
} }
// Overload for std::pair. // Overload for std::pair.
template <typename T1, typename T2> template <typename T1, typename T2> void PrintTo(const ::std::pair<T1, T2> &value, ::std::ostream *os)
void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) { {
*os << '('; *os << '(';
// We cannot use UniversalPrint(value.first, os) here, as T1 may be // We cannot use UniversalPrint(value.first, os) here, as T1 may be
// a reference type. The same for printing value.second. // a reference type. The same for printing value.second.
@ -657,8 +678,8 @@ void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
// Implements printing a non-reference type T by letting the compiler // Implements printing a non-reference type T by letting the compiler
// pick the right overload of PrintTo() for T. // pick the right overload of PrintTo() for T.
template <typename T> template <typename T> class UniversalPrinter
class UniversalPrinter { {
public: public:
// MSVC warns about adding const to a function type, so we want to // MSVC warns about adding const to a function type, so we want to
// disable the warning. // disable the warning.
@ -667,7 +688,8 @@ class UniversalPrinter {
// Note: we deliberately don't call this PrintTo(), as that name // Note: we deliberately don't call this PrintTo(), as that name
// conflicts with ::testing::internal::PrintTo in the body of the // conflicts with ::testing::internal::PrintTo in the body of the
// function. // function.
static void Print(const T& value, ::std::ostream* os) { static void Print(const T &value, ::std::ostream *os)
{
// By default, ::testing::internal::PrintTo() is used for printing // By default, ::testing::internal::PrintTo() is used for printing
// the value. // the value.
// //
@ -686,14 +708,18 @@ class UniversalPrinter {
// Printer for absl::optional // Printer for absl::optional
template <typename T> template <typename T> class UniversalPrinter<::absl::optional<T>>
class UniversalPrinter<::absl::optional<T>> { {
public: public:
static void Print(const ::absl::optional<T>& value, ::std::ostream* os) { static void Print(const ::absl::optional<T> &value, ::std::ostream *os)
{
*os << '('; *os << '(';
if (!value) { if (!value)
{
*os << "nullopt"; *os << "nullopt";
} else { }
else
{
UniversalPrint(*value, os); UniversalPrint(*value, os);
} }
*os << ')'; *os << ')';
@ -702,23 +728,25 @@ class UniversalPrinter<::absl::optional<T>> {
// Printer for absl::variant // Printer for absl::variant
template <typename... T> template <typename... T> class UniversalPrinter<::absl::variant<T...>>
class UniversalPrinter<::absl::variant<T...>> { {
public: public:
static void Print(const ::absl::variant<T...>& value, ::std::ostream* os) { static void Print(const ::absl::variant<T...> &value, ::std::ostream *os)
{
*os << '('; *os << '(';
absl::visit(Visitor{os}, value); absl::visit(Visitor{os}, value);
*os << ')'; *os << ')';
} }
private: private:
struct Visitor { struct Visitor
template <typename U> {
void operator()(const U& u) const { template <typename U> void operator()(const U &u) const
{
*os << "'" << GetTypeName<U>() << "' with value "; *os << "'" << GetTypeName<U>() << "' with value ";
UniversalPrint(u, os); UniversalPrint(u, os);
} }
::std::ostream* os; ::std::ostream *os;
}; };
}; };
@ -726,20 +754,26 @@ class UniversalPrinter<::absl::variant<T...>> {
// UniversalPrintArray(begin, len, os) prints an array of 'len' // UniversalPrintArray(begin, len, os) prints an array of 'len'
// elements, starting at address 'begin'. // elements, starting at address 'begin'.
template <typename T> template <typename T> void UniversalPrintArray(const T *begin, size_t len, ::std::ostream *os)
void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { {
if (len == 0) { if (len == 0)
{
*os << "{}"; *os << "{}";
} else { }
else
{
*os << "{ "; *os << "{ ";
const size_t kThreshold = 18; const size_t kThreshold = 18;
const size_t kChunkSize = 8; const size_t kChunkSize = 8;
// If the array has more than kThreshold elements, we'll have to // If the array has more than kThreshold elements, we'll have to
// omit some details by printing only the first and the last // omit some details by printing only the first and the last
// kChunkSize elements. // kChunkSize elements.
if (len <= kThreshold) { if (len <= kThreshold)
{
PrintRawArrayTo(begin, len, os); PrintRawArrayTo(begin, len, os);
} else { }
else
{
PrintRawArrayTo(begin, kChunkSize, os); PrintRawArrayTo(begin, kChunkSize, os);
*os << ", ..., "; *os << ", ..., ";
PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
@ -748,36 +782,36 @@ void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
} }
} }
// This overload prints a (const) char array compactly. // This overload prints a (const) char array compactly.
GTEST_API_ void UniversalPrintArray( GTEST_API_ void UniversalPrintArray(const char *begin, size_t len, ::std::ostream *os);
const char* begin, size_t len, ::std::ostream* os);
// This overload prints a (const) wchar_t array compactly. // This overload prints a (const) wchar_t array compactly.
GTEST_API_ void UniversalPrintArray( GTEST_API_ void UniversalPrintArray(const wchar_t *begin, size_t len, ::std::ostream *os);
const wchar_t* begin, size_t len, ::std::ostream* os);
// Implements printing an array type T[N]. // Implements printing an array type T[N].
template <typename T, size_t N> template <typename T, size_t N> class UniversalPrinter<T[N]>
class UniversalPrinter<T[N]> { {
public: public:
// Prints the given array, omitting some elements when there are too // Prints the given array, omitting some elements when there are too
// many. // many.
static void Print(const T (&a)[N], ::std::ostream* os) { static void Print(const T (&a)[N], ::std::ostream *os)
{
UniversalPrintArray(a, N, os); UniversalPrintArray(a, N, os);
} }
}; };
// Implements printing a reference type T&. // Implements printing a reference type T&.
template <typename T> template <typename T> class UniversalPrinter<T &>
class UniversalPrinter<T&> { {
public: public:
// MSVC warns about adding const to a function type, so we want to // MSVC warns about adding const to a function type, so we want to
// disable the warning. // disable the warning.
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180) GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
static void Print(const T& value, ::std::ostream* os) { static void Print(const T &value, ::std::ostream *os)
{
// Prints the address of the value. We use reinterpret_cast here // Prints the address of the value. We use reinterpret_cast here
// as static_cast doesn't compile when T is a function type. // as static_cast doesn't compile when T is a function type.
*os << "@" << reinterpret_cast<const void*>(&value) << " "; *os << "@" << reinterpret_cast<const void *>(&value) << " ";
// Then prints the value itself. // Then prints the value itself.
UniversalPrint(value, os); UniversalPrint(value, os);
@ -790,70 +824,83 @@ class UniversalPrinter<T&> {
// (but not the address) is printed; for a (const) char pointer, the // (but not the address) is printed; for a (const) char pointer, the
// NUL-terminated string (but not the pointer) is printed. // NUL-terminated string (but not the pointer) is printed.
template <typename T> template <typename T> class UniversalTersePrinter
class UniversalTersePrinter { {
public: public:
static void Print(const T& value, ::std::ostream* os) { static void Print(const T &value, ::std::ostream *os)
{
UniversalPrint(value, os); UniversalPrint(value, os);
} }
}; };
template <typename T> template <typename T> class UniversalTersePrinter<T &>
class UniversalTersePrinter<T&> { {
public: public:
static void Print(const T& value, ::std::ostream* os) { static void Print(const T &value, ::std::ostream *os)
{
UniversalPrint(value, os); UniversalPrint(value, os);
} }
}; };
template <typename T, size_t N> template <typename T, size_t N> class UniversalTersePrinter<T[N]>
class UniversalTersePrinter<T[N]> { {
public: public:
static void Print(const T (&value)[N], ::std::ostream* os) { static void Print(const T (&value)[N], ::std::ostream *os)
{
UniversalPrinter<T[N]>::Print(value, os); UniversalPrinter<T[N]>::Print(value, os);
} }
}; };
template <> template <> class UniversalTersePrinter<const char *>
class UniversalTersePrinter<const char*> { {
public: public:
static void Print(const char* str, ::std::ostream* os) { static void Print(const char *str, ::std::ostream *os)
if (str == nullptr) { {
if (str == nullptr)
{
*os << "NULL"; *os << "NULL";
} else { }
else
{
UniversalPrint(std::string(str), os); UniversalPrint(std::string(str), os);
} }
} }
}; };
template <> template <> class UniversalTersePrinter<char *>
class UniversalTersePrinter<char*> { {
public: public:
static void Print(char* str, ::std::ostream* os) { static void Print(char *str, ::std::ostream *os)
UniversalTersePrinter<const char*>::Print(str, os); {
UniversalTersePrinter<const char *>::Print(str, os);
} }
}; };
#if GTEST_HAS_STD_WSTRING #if GTEST_HAS_STD_WSTRING
template <> template <> class UniversalTersePrinter<const wchar_t *>
class UniversalTersePrinter<const wchar_t*> { {
public: public:
static void Print(const wchar_t* str, ::std::ostream* os) { static void Print(const wchar_t *str, ::std::ostream *os)
if (str == nullptr) { {
if (str == nullptr)
{
*os << "NULL"; *os << "NULL";
} else { }
else
{
UniversalPrint(::std::wstring(str), os); UniversalPrint(::std::wstring(str), os);
} }
} }
}; };
#endif #endif
template <> template <> class UniversalTersePrinter<wchar_t *>
class UniversalTersePrinter<wchar_t*> { {
public: public:
static void Print(wchar_t* str, ::std::ostream* os) { static void Print(wchar_t *str, ::std::ostream *os)
UniversalTersePrinter<const wchar_t*>::Print(str, os); {
UniversalTersePrinter<const wchar_t *>::Print(str, os);
} }
}; };
template <typename T> template <typename T> void UniversalTersePrint(const T &value, ::std::ostream *os)
void UniversalTersePrint(const T& value, ::std::ostream* os) { {
UniversalTersePrinter<T>::Print(value, os); UniversalTersePrinter<T>::Print(value, os);
} }
@ -861,27 +908,25 @@ void UniversalTersePrint(const T& value, ::std::ostream* os) {
// difference between this and UniversalTersePrint() is that for a // difference between this and UniversalTersePrint() is that for a
// (const) char pointer, this prints both the pointer and the // (const) char pointer, this prints both the pointer and the
// NUL-terminated string. // NUL-terminated string.
template <typename T> template <typename T> void UniversalPrint(const T &value, ::std::ostream *os)
void UniversalPrint(const T& value, ::std::ostream* os) { {
// A workarond for the bug in VC++ 7.1 that prevents us from instantiating // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
// UniversalPrinter with T directly. // UniversalPrinter with T directly.
typedef T T1; typedef T T1;
UniversalPrinter<T1>::Print(value, os); UniversalPrinter<T1>::Print(value, os);
} }
typedef ::std::vector< ::std::string> Strings; typedef ::std::vector<::std::string> Strings;
// Tersely prints the first N fields of a tuple to a string vector, // Tersely prints the first N fields of a tuple to a string vector,
// one element for each field. // one element for each field.
template <typename Tuple> template <typename Tuple> void TersePrintPrefixToStrings(const Tuple &, std::integral_constant<size_t, 0>, Strings *)
void TersePrintPrefixToStrings(const Tuple&, std::integral_constant<size_t, 0>, {
Strings*) {} }
template <typename Tuple, size_t I> template <typename Tuple, size_t I>
void TersePrintPrefixToStrings(const Tuple& t, void TersePrintPrefixToStrings(const Tuple &t, std::integral_constant<size_t, I>, Strings *strings)
std::integral_constant<size_t, I>, {
Strings* strings) { TersePrintPrefixToStrings(t, std::integral_constant<size_t, I - 1>(), strings);
TersePrintPrefixToStrings(t, std::integral_constant<size_t, I - 1>(),
strings);
::std::stringstream ss; ::std::stringstream ss;
UniversalTersePrint(std::get<I - 1>(t), &ss); UniversalTersePrint(std::get<I - 1>(t), &ss);
strings->push_back(ss.str()); strings->push_back(ss.str());
@ -890,29 +935,28 @@ void TersePrintPrefixToStrings(const Tuple& t,
// Prints the fields of a tuple tersely to a string vector, one // Prints the fields of a tuple tersely to a string vector, one
// element for each field. See the comment before // element for each field. See the comment before
// UniversalTersePrint() for how we define "tersely". // UniversalTersePrint() for how we define "tersely".
template <typename Tuple> template <typename Tuple> Strings UniversalTersePrintTupleFieldsToStrings(const Tuple &value)
Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { {
Strings result; Strings result;
TersePrintPrefixToStrings( TersePrintPrefixToStrings(value, std::integral_constant<size_t, std::tuple_size<Tuple>::value>(), &result);
value, std::integral_constant<size_t, std::tuple_size<Tuple>::value>(),
&result);
return result; return result;
} }
} // namespace internal } // namespace internal
#if GTEST_HAS_ABSL #if GTEST_HAS_ABSL
namespace internal2 { namespace internal2
{
template <typename T> template <typename T>
void TypeWithoutFormatter<T, kConvertibleToStringView>::PrintValue( void TypeWithoutFormatter<T, kConvertibleToStringView>::PrintValue(const T &value, ::std::ostream *os)
const T& value, ::std::ostream* os) { {
internal::PrintTo(absl::string_view(value), os); internal::PrintTo(absl::string_view(value), os);
} }
} // namespace internal2 } // namespace internal2
#endif #endif
template <typename T> template <typename T>::std::string PrintToString(const T &value)
::std::string PrintToString(const T& value) { {
::std::stringstream ss; ::std::stringstream ss;
internal::UniversalTersePrinter<T>::Print(value, &ss); internal::UniversalTersePrinter<T>::Print(value, &ss);
return ss.str(); return ss.str();

View File

@ -38,10 +38,10 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
{
// This helper class can be used to mock out Google Test failure reporting // This helper class can be used to mock out Google Test failure reporting
// so that we can test Google Test or code that builds on Google Test. // so that we can test Google Test or code that builds on Google Test.
@ -52,11 +52,12 @@ namespace testing {
// generated in the same thread that created this object or it can intercept // generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with // all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor. // the second argument to the two arguments constructor.
class GTEST_API_ ScopedFakeTestPartResultReporter class GTEST_API_ ScopedFakeTestPartResultReporter : public TestPartResultReporterInterface
: public TestPartResultReporterInterface { {
public: public:
// The two possible mocking modes of this object. // The two possible mocking modes of this object.
enum InterceptMode { enum InterceptMode
{
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
INTERCEPT_ALL_THREADS // Intercepts all failures. INTERCEPT_ALL_THREADS // Intercepts all failures.
}; };
@ -65,11 +66,10 @@ class GTEST_API_ ScopedFakeTestPartResultReporter
// by Google Test. The 'result' parameter specifies where to report the // by Google Test. The 'result' parameter specifies where to report the
// results. This reporter will only catch failures generated in the current // results. This reporter will only catch failures generated in the current
// thread. DEPRECATED // thread. DEPRECATED
explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); explicit ScopedFakeTestPartResultReporter(TestPartResultArray *result);
// Same as above, but you can choose the interception scope of this object. // Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, TestPartResultArray *result);
TestPartResultArray* result);
// The d'tor restores the previous test part result reporter. // The d'tor restores the previous test part result reporter.
~ScopedFakeTestPartResultReporter() override; ~ScopedFakeTestPartResultReporter() override;
@ -79,33 +79,35 @@ class GTEST_API_ ScopedFakeTestPartResultReporter
// //
// This method is from the TestPartResultReporterInterface // This method is from the TestPartResultReporterInterface
// interface. // interface.
void ReportTestPartResult(const TestPartResult& result) override; void ReportTestPartResult(const TestPartResult &result) override;
private: private:
void Init(); void Init();
const InterceptMode intercept_mode_; const InterceptMode intercept_mode_;
TestPartResultReporterInterface* old_reporter_; TestPartResultReporterInterface *old_reporter_;
TestPartResultArray* const result_; TestPartResultArray *const result_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
}; };
namespace internal { namespace internal
{
// A helper class for implementing EXPECT_FATAL_FAILURE() and // A helper class for implementing EXPECT_FATAL_FAILURE() and
// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given // EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
// TestPartResultArray contains exactly one failure that has the given // TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a // type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated. // non-fatal failure will be generated.
class GTEST_API_ SingleFailureChecker { class GTEST_API_ SingleFailureChecker
{
public: public:
// The constructor remembers the arguments. // The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray* results, SingleFailureChecker(const TestPartResultArray *results, TestPartResult::Type type, const std::string &substr);
TestPartResult::Type type, const std::string& substr);
~SingleFailureChecker(); ~SingleFailureChecker();
private: private:
const TestPartResultArray* const results_; const TestPartResultArray *const results_;
const TestPartResult::Type type_; const TestPartResult::Type type_;
const std::string substr_; const std::string substr_;
@ -142,37 +144,45 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// works. The AcceptsMacroThatExpandsToUnprotectedComma test in // works. The AcceptsMacroThatExpandsToUnprotectedComma test in
// gtest_unittest.cc will fail to compile if we do that. // gtest_unittest.cc will fail to compile if we do that.
#define EXPECT_FATAL_FAILURE(statement, substr) \ #define EXPECT_FATAL_FAILURE(statement, substr) \
do { \ do \
class GTestExpectFatalFailureHelper {\ { \
public:\ class GTestExpectFatalFailureHelper \
static void Execute() { statement; }\ { \
};\ public: \
::testing::TestPartResultArray gtest_failures;\ static void Execute() \
::testing::internal::SingleFailureChecker gtest_checker(\ { \
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ statement; \
{\ } \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ }; \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::TestPartResultArray gtest_failures; \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\ ::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
GTestExpectFatalFailureHelper::Execute();\ ::testing::TestPartResult::kFatalFailure, (substr)); \
}\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse()) } while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ #define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \ do \
class GTestExpectFatalFailureHelper {\ { \
public:\ class GTestExpectFatalFailureHelper \
static void Execute() { statement; }\ { \
};\ public: \
::testing::TestPartResultArray gtest_failures;\ static void Execute() \
::testing::internal::SingleFailureChecker gtest_checker(\ { \
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ statement; \
{\ } \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ }; \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::TestPartResultArray gtest_failures; \
INTERCEPT_ALL_THREADS, &gtest_failures);\ ::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
GTestExpectFatalFailureHelper::Execute();\ ::testing::TestPartResult::kFatalFailure, (substr)); \
}\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse()) } while (::testing::internal::AlwaysFalse())
// A macro for testing Google Test assertions or code that's expected to // A macro for testing Google Test assertions or code that's expected to
@ -208,31 +218,35 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) // GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
// to avoid an MSVC warning on unreachable code. // to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \ #define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\ do \
::testing::TestPartResultArray gtest_failures;\ { \
::testing::internal::SingleFailureChecker gtest_checker(\ ::testing::TestPartResultArray gtest_failures; \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \ ::testing::internal::SingleFailureChecker gtest_checker( \
(substr));\ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter:: \ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\ if (::testing::internal::AlwaysTrue()) \
if (::testing::internal::AlwaysTrue()) { statement; }\ { \
}\ statement; \
} \
} \
} while (::testing::internal::AlwaysFalse()) } while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ #define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\ do \
::testing::TestPartResultArray gtest_failures;\ { \
::testing::internal::SingleFailureChecker gtest_checker(\ ::testing::TestPartResultArray gtest_failures; \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \ ::testing::internal::SingleFailureChecker gtest_checker( \
(substr));\ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{\ { \
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ ::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
&gtest_failures);\ if (::testing::internal::AlwaysTrue()) \
if (::testing::internal::AlwaysTrue()) { statement; }\ { \
}\ statement; \
} \
} \
} while (::testing::internal::AlwaysFalse()) } while (::testing::internal::AlwaysFalse())
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_

View File

@ -32,25 +32,27 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#include <iosfwd>
#include <vector>
#include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-string.h" #include "gtest/internal/gtest-string.h"
#include <iosfwd>
#include <vector>
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
{
// A copyable object representing the result of a test part (i.e. an // A copyable object representing the result of a test part (i.e. an
// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). // assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
// //
// Don't inherit from TestPartResult as its destructor is not virtual. // Don't inherit from TestPartResult as its destructor is not virtual.
class GTEST_API_ TestPartResult { class GTEST_API_ TestPartResult
{
public: public:
// The possible outcomes of a test part (i.e. an assertion or an // The possible outcomes of a test part (i.e. an assertion or an
// explicit SUCCEED(), FAIL(), or ADD_FAILURE()). // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
enum Type { enum Type
{
kSuccess, // Succeeded. kSuccess, // Succeeded.
kNonFatalFailure, // Failed but the test can continue. kNonFatalFailure, // Failed but the test can continue.
kFatalFailure, // Failed and the test should be terminated. kFatalFailure, // Failed and the test should be terminated.
@ -60,54 +62,80 @@ class GTEST_API_ TestPartResult {
// C'tor. TestPartResult does NOT have a default constructor. // C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a // Always use this constructor (with parameters) to create a
// TestPartResult object. // TestPartResult object.
TestPartResult(Type a_type, const char* a_file_name, int a_line_number, TestPartResult(Type a_type, const char *a_file_name, int a_line_number, const char *a_message)
const char* a_message) : type_(a_type), file_name_(a_file_name == nullptr ? "" : a_file_name), line_number_(a_line_number),
: type_(a_type), summary_(ExtractSummary(a_message)), message_(a_message)
file_name_(a_file_name == nullptr ? "" : a_file_name), {
line_number_(a_line_number), }
summary_(ExtractSummary(a_message)),
message_(a_message) {}
// Gets the outcome of the test part. // Gets the outcome of the test part.
Type type() const { return type_; } Type type() const
{
return type_;
}
// Gets the name of the source file where the test part took place, or // Gets the name of the source file where the test part took place, or
// NULL if it's unknown. // NULL if it's unknown.
const char* file_name() const { const char *file_name() const
{
return file_name_.empty() ? nullptr : file_name_.c_str(); return file_name_.empty() ? nullptr : file_name_.c_str();
} }
// Gets the line in the source file where the test part took place, // Gets the line in the source file where the test part took place,
// or -1 if it's unknown. // or -1 if it's unknown.
int line_number() const { return line_number_; } int line_number() const
{
return line_number_;
}
// Gets the summary of the failure message. // Gets the summary of the failure message.
const char* summary() const { return summary_.c_str(); } const char *summary() const
{
return summary_.c_str();
}
// Gets the message associated with the test part. // Gets the message associated with the test part.
const char* message() const { return message_.c_str(); } const char *message() const
{
return message_.c_str();
}
// Returns true if and only if the test part was skipped. // Returns true if and only if the test part was skipped.
bool skipped() const { return type_ == kSkip; } bool skipped() const
{
return type_ == kSkip;
}
// Returns true if and only if the test part passed. // Returns true if and only if the test part passed.
bool passed() const { return type_ == kSuccess; } bool passed() const
{
return type_ == kSuccess;
}
// Returns true if and only if the test part non-fatally failed. // Returns true if and only if the test part non-fatally failed.
bool nonfatally_failed() const { return type_ == kNonFatalFailure; } bool nonfatally_failed() const
{
return type_ == kNonFatalFailure;
}
// Returns true if and only if the test part fatally failed. // Returns true if and only if the test part fatally failed.
bool fatally_failed() const { return type_ == kFatalFailure; } bool fatally_failed() const
{
return type_ == kFatalFailure;
}
// Returns true if and only if the test part failed. // Returns true if and only if the test part failed.
bool failed() const { return fatally_failed() || nonfatally_failed(); } bool failed() const
{
return fatally_failed() || nonfatally_failed();
}
private: private:
Type type_; Type type_;
// Gets the summary of the failure message by omitting the stack // Gets the summary of the failure message by omitting the stack
// trace in it. // trace in it.
static std::string ExtractSummary(const char* message); static std::string ExtractSummary(const char *message);
// The name of the source file where the test part took place, or // The name of the source file where the test part took place, or
// "" if the source file is unknown. // "" if the source file is unknown.
@ -120,21 +148,24 @@ class GTEST_API_ TestPartResult {
}; };
// Prints a TestPartResult object. // Prints a TestPartResult object.
std::ostream& operator<<(std::ostream& os, const TestPartResult& result); std::ostream &operator<<(std::ostream &os, const TestPartResult &result);
// An array of TestPartResult objects. // An array of TestPartResult objects.
// //
// Don't inherit from TestPartResultArray as its destructor is not // Don't inherit from TestPartResultArray as its destructor is not
// virtual. // virtual.
class GTEST_API_ TestPartResultArray { class GTEST_API_ TestPartResultArray
{
public: public:
TestPartResultArray() {} TestPartResultArray()
{
}
// Appends the given TestPartResult to the array. // Appends the given TestPartResult to the array.
void Append(const TestPartResult& result); void Append(const TestPartResult &result);
// Returns the TestPartResult at the given index (0-based). // Returns the TestPartResult at the given index (0-based).
const TestPartResult& GetTestPartResult(int index) const; const TestPartResult &GetTestPartResult(int index) const;
// Returns the number of TestPartResult objects in the array. // Returns the number of TestPartResult objects in the array.
int size() const; int size() const;
@ -146,14 +177,18 @@ class GTEST_API_ TestPartResultArray {
}; };
// This interface knows how to report a test part result. // This interface knows how to report a test part result.
class GTEST_API_ TestPartResultReporterInterface { class GTEST_API_ TestPartResultReporterInterface
{
public: public:
virtual ~TestPartResultReporterInterface() {} virtual ~TestPartResultReporterInterface()
{
}
virtual void ReportTestPartResult(const TestPartResult& result) = 0; virtual void ReportTestPartResult(const TestPartResult &result) = 0;
}; };
namespace internal { namespace internal
{
// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a // This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
// statement generates new fatal failures. To do so it registers itself as the // statement generates new fatal failures. To do so it registers itself as the
@ -161,16 +196,20 @@ namespace internal {
// reported, it only delegates the reporting to the former result reporter. // reported, it only delegates the reporting to the former result reporter.
// The original result reporter is restored in the destructor. // The original result reporter is restored in the destructor.
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
class GTEST_API_ HasNewFatalFailureHelper class GTEST_API_ HasNewFatalFailureHelper : public TestPartResultReporterInterface
: public TestPartResultReporterInterface { {
public: public:
HasNewFatalFailureHelper(); HasNewFatalFailureHelper();
~HasNewFatalFailureHelper() override; ~HasNewFatalFailureHelper() override;
void ReportTestPartResult(const TestPartResult& result) override; void ReportTestPartResult(const TestPartResult &result) override;
bool has_new_fatal_failure() const { return has_new_fatal_failure_; } bool has_new_fatal_failure() const
{
return has_new_fatal_failure_;
}
private: private:
bool has_new_fatal_failure_; bool has_new_fatal_failure_;
TestPartResultReporterInterface* original_reporter_; TestPartResultReporterInterface *original_reporter_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
}; };

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// GOOGLETEST_CM0001 DO NOT DELETE // GOOGLETEST_CM0001 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ #ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
@ -185,41 +184,29 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// Expands to the name of the typedef for the NameGenerator, responsible for // Expands to the name of the typedef for the NameGenerator, responsible for
// creating the suffixes of the name. // creating the suffixes of the name.
#define GTEST_NAME_GENERATOR_(TestSuiteName) \ #define GTEST_NAME_GENERATOR_(TestSuiteName) gtest_type_params_##TestSuiteName##_NameGenerator
gtest_type_params_##TestSuiteName##_NameGenerator
#define TYPED_TEST_SUITE(CaseName, Types, ...) \ #define TYPED_TEST_SUITE(CaseName, Types, ...) \
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_( \ typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_(CaseName); \
CaseName); \ typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type GTEST_NAME_GENERATOR_(CaseName)
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \
GTEST_NAME_GENERATOR_(CaseName)
# define TYPED_TEST(CaseName, TestName) \ #define TYPED_TEST(CaseName, TestName) \
template <typename gtest_TypeParam_> \ template <typename gtest_TypeParam_> \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) : public CaseName<gtest_TypeParam_> \
: public CaseName<gtest_TypeParam_> { \ { \
private: \ private: \
typedef CaseName<gtest_TypeParam_> TestFixture; \ typedef CaseName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \ typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \ virtual void TestBody(); \
}; \ }; \
static bool gtest_##CaseName##_##TestName##_registered_ \ static bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTest< \ ::testing::internal::TypeParameterizedTest< \
CaseName, \ CaseName, ::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, \ GTEST_TYPE_PARAMS_(CaseName)>:: \
TestName)>, \ Register( \
GTEST_TYPE_PARAMS_( \ "", ::testing::internal::CodeLocation(__FILE__, __LINE__), #CaseName, #TestName, 0, \
CaseName)>::Register("", \ ::testing::internal::GenerateNames<GTEST_NAME_GENERATOR_(CaseName), GTEST_TYPE_PARAMS_(CaseName)>()); \
::testing::internal::CodeLocation( \ template <typename gtest_TypeParam_> void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
__FILE__, __LINE__), \
#CaseName, #TestName, 0, \
::testing::internal::GenerateNames< \
GTEST_NAME_GENERATOR_(CaseName), \
GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> \
void GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)<gtest_TypeParam_>::TestBody()
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
@ -245,22 +232,19 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// //
// Expands to the name of the variable used to remember the names of // Expands to the name of the variable used to remember the names of
// the defined tests in the given test suite. // the defined tests in the given test suite.
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \ #define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) gtest_typed_test_suite_p_state_##TestSuiteName##_
gtest_typed_test_suite_p_state_##TestSuiteName##_
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. // INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
// //
// Expands to the name of the variable used to remember the names of // Expands to the name of the variable used to remember the names of
// the registered tests in the given test suite. // the registered tests in the given test suite.
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \ #define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) gtest_registered_test_names_##TestSuiteName##_
gtest_registered_test_names_##TestSuiteName##_
// The variables defined in the type-parameterized test macros are // The variables defined in the type-parameterized test macros are
// static as typically these macros are used in a .h file that can be // static as typically these macros are used in a .h file that can be
// #included in multiple translation units linked together. // #included in multiple translation units linked together.
#define TYPED_TEST_SUITE_P(SuiteName) \ #define TYPED_TEST_SUITE_P(SuiteName) \
static ::testing::internal::TypedTestSuitePState \ static ::testing::internal::TypedTestSuitePState GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
@ -270,58 +254,48 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_P(SuiteName, TestName) \ #define TYPED_TEST_P(SuiteName, TestName) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
template <typename gtest_TypeParam_> \ { \
class TestName : public SuiteName<gtest_TypeParam_> { \ template <typename gtest_TypeParam_> class TestName : public SuiteName<gtest_TypeParam_> \
{ \
private: \ private: \
typedef SuiteName<gtest_TypeParam_> TestFixture; \ typedef SuiteName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \ typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \ virtual void TestBody(); \
}; \ }; \
static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName(__FILE__, __LINE__, #SuiteName, #TestName); \
__FILE__, __LINE__, #SuiteName, #TestName); \
} \ } \
template <typename gtest_TypeParam_> \ template <typename gtest_TypeParam_> void GTEST_SUITE_NAMESPACE_(SuiteName)::TestName<gtest_TypeParam_>::TestBody()
void GTEST_SUITE_NAMESPACE_( \
SuiteName)::TestName<gtest_TypeParam_>::TestBody()
#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \ #define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \ namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
{ \
typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
} \ } \
static const char* const GTEST_REGISTERED_TEST_NAMES_( \ static const char *const GTEST_REGISTERED_TEST_NAMES_(SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames(__FILE__, __LINE__, #__VA_ARGS__)
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \
__FILE__, __LINE__, #__VA_ARGS__)
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define REGISTER_TYPED_TEST_CASE_P \ #define REGISTER_TYPED_TEST_CASE_P \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \ static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), ""); \
""); \
REGISTER_TYPED_TEST_SUITE_P REGISTER_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \ #define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \ static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTestSuite< \ ::testing::internal::TypeParameterizedTestSuite<SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
::testing::internal::TypeList<Types>::type>:: \ ::testing::internal::TypeList<Types>::type>:: \
Register(#Prefix, \ Register(#Prefix, ::testing::internal::CodeLocation(__FILE__, __LINE__), \
::testing::internal::CodeLocation(__FILE__, __LINE__), \ &GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, \ ::testing::internal::GenerateNames<::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type, \
GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames< \
::testing::internal::NameGeneratorSelector< \
__VA_ARGS__>::type, \
::testing::internal::TypeList<Types>::type>()) ::testing::internal::TypeList<Types>::type>())
// Legacy API is deprecated but still available // Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_CASE_P \ #define INSTANTIATE_TYPED_TEST_CASE_P \
static_assert( \ static_assert(::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TYPED_TEST_SUITE_P INSTANTIATE_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,8 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace testing { namespace testing
{
// This header implements a family of generic predicate assertion // This header implements a family of generic predicate assertion
// macros: // macros:
@ -79,65 +80,43 @@ namespace testing {
else \ else \
on_failure(gtest_ar.failure_message()) on_failure(gtest_ar.failure_message())
// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1>
typename T1> AssertionResult AssertPred1Helper(const char *pred_text, const char *e1, Pred pred, const T1 &v1)
AssertionResult AssertPred1Helper(const char* pred_text, {
const char* e1, if (pred(v1))
Pred pred, return AssertionSuccess();
const T1& v1) {
if (pred(v1)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ") evaluates to false, where"
<< "\n" << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1); << e1 << " evaluates to " << ::testing::PrintToString(v1);
} }
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ #define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) GTEST_ASSERT_(pred_format(#v1, v1), on_failure)
GTEST_ASSERT_(pred_format(#v1, v1), \
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code. // this in your code.
#define GTEST_PRED1_(pred, v1, on_failure)\ #define GTEST_PRED1_(pred, v1, on_failure) GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, #v1, pred, v1), on_failure)
GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
#v1, \
pred, \
v1), on_failure)
// Unary predicate assertion macros. // Unary predicate assertion macros.
#define EXPECT_PRED_FORMAT1(pred_format, v1) \ #define EXPECT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) #define EXPECT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED1(pred, v1) \ #define ASSERT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED1(pred, v1) \
GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2>
typename T1, AssertionResult AssertPred2Helper(const char *pred_text, const char *e1, const char *e2, Pred pred, const T1 &v1,
typename T2> const T2 &v2)
AssertionResult AssertPred2Helper(const char* pred_text, {
const char* e1, if (pred(v1, v2))
const char* e2, return AssertionSuccess();
Pred pred,
const T1& v1,
const T2& v2) {
if (pred(v1, v2)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ", " << e2
<< ") evaluates to false, where"
<< "\n" << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2); << e2 << " evaluates to " << ::testing::PrintToString(v2);
@ -145,51 +124,29 @@ AssertionResult AssertPred2Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ #define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure) GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), on_failure)
GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code. // this in your code.
#define GTEST_PRED2_(pred, v1, v2, on_failure)\ #define GTEST_PRED2_(pred, v1, v2, on_failure) \
GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, #v1, #v2, pred, v1, v2), on_failure)
#v1, \
#v2, \
pred, \
v1, \
v2), on_failure)
// Binary predicate assertion macros. // Binary predicate assertion macros.
#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ #define EXPECT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) #define EXPECT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED2(pred, v1, v2) \ #define ASSERT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3>
typename T1, AssertionResult AssertPred3Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, Pred pred,
typename T2, const T1 &v1, const T2 &v2, const T3 &v3)
typename T3> {
AssertionResult AssertPred3Helper(const char* pred_text, if (pred(v1, v2, v3))
const char* e1, return AssertionSuccess();
const char* e2,
const char* e3,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3) {
if (pred(v1, v2, v3)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ") evaluates to false, where"
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3
<< ") evaluates to false, where"
<< "\n" << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
@ -198,55 +155,31 @@ AssertionResult AssertPred3Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ #define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code. // this in your code.
#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ #define GTEST_PRED3_(pred, v1, v2, v3, on_failure) \
GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, #v1, #v2, #v3, pred, v1, v2, v3), on_failure)
#v1, \
#v2, \
#v3, \
pred, \
v1, \
v2, \
v3), on_failure)
// Ternary predicate assertion macros. // Ternary predicate assertion macros.
#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ #define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED3(pred, v1, v2, v3) \ #define EXPECT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) #define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ #define ASSERT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3, typename T4>
typename T1, AssertionResult AssertPred4Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
typename T2, Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4)
typename T3, {
typename T4> if (pred(v1, v2, v3, v4))
AssertionResult AssertPred4Helper(const char* pred_text, return AssertionSuccess();
const char* e1,
const char* e2,
const char* e3,
const char* e4,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4) {
if (pred(v1, v2, v3, v4)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< ") evaluates to false, where" << ") evaluates to false, where"
<< "\n" << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
@ -257,61 +190,34 @@ AssertionResult AssertPred4Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ #define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code. // this in your code.
#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ #define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, #v1, #v2, #v3, #v4, pred, v1, v2, v3, v4), on_failure)
#v1, \
#v2, \
#v3, \
#v4, \
pred, \
v1, \
v2, \
v3, \
v4), on_failure)
// 4-ary predicate assertion macros. // 4-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ #define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ #define EXPECT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ #define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ #define ASSERT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use // Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code. // this in your code.
template <typename Pred, template <typename Pred, typename T1, typename T2, typename T3, typename T4, typename T5>
typename T1, AssertionResult AssertPred5Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
typename T2, const char *e5, Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4,
typename T3, const T5 &v5)
typename T4, {
typename T5> if (pred(v1, v2, v3, v4, v5))
AssertionResult AssertPred5Helper(const char* pred_text, return AssertionSuccess();
const char* e1,
const char* e2,
const char* e3,
const char* e4,
const char* e5,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4,
const T5& v5) {
if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
return AssertionFailure() return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ", " << e5
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ") evaluates to false, where"
<< ", " << e5 << ") evaluates to false, where"
<< "\n" << "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n" << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n" << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
@ -322,37 +228,21 @@ AssertionResult AssertPred5Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
// Don't use this in your code. // Don't use this in your code.
#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), on_failure)
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use // Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code. // this in your code.
#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, pred, v1, v2, v3, v4, v5), on_failure)
#v1, \
#v2, \
#v3, \
#v4, \
#v5, \
pred, \
v1, \
v2, \
v3, \
v4, \
v5), on_failure)
// 5-ary predicate assertion macros. // 5-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ #define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ #define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ #define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
} // namespace testing } // namespace testing

View File

@ -55,7 +55,6 @@
// Note: The test class must be in the same namespace as the class being tested. // Note: The test class must be in the same namespace as the class being tested.
// For example, putting MyClassTest in an anonymous namespace will not work. // For example, putting MyClassTest in an anonymous namespace will not work.
#define FRIEND_TEST(test_case_name, test_name)\ #define FRIEND_TEST(test_case_name, test_name) friend class test_case_name##_##test_name##_Test
friend class test_case_name##_##test_name##_Test
#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ #endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_

View File

@ -39,11 +39,13 @@
#include "gtest/gtest-matchers.h" #include "gtest/gtest-matchers.h"
#include "gtest/internal/gtest-internal.h" #include "gtest/internal/gtest-internal.h"
#include <stdio.h>
#include <memory> #include <memory>
#include <stdio.h>
namespace testing { namespace testing
namespace internal { {
namespace internal
{
GTEST_DECLARE_string_(internal_run_death_test); GTEST_DECLARE_string_(internal_run_death_test);
@ -54,8 +56,7 @@ const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
#if GTEST_HAS_DEATH_TEST #if GTEST_HAS_DEATH_TEST
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
// DeathTest is a class that hides much of the complexity of the // DeathTest is a class that hides much of the complexity of the
// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method // GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
@ -70,7 +71,8 @@ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
// by wait(2) // by wait(2)
// exit code: The integer code passed to exit(3), _exit(2), or // exit code: The integer code passed to exit(3), _exit(2), or
// returned from main() // returned from main()
class GTEST_API_ DeathTest { class GTEST_API_ DeathTest
{
public: public:
// Create returns false if there was an error determining the // Create returns false if there was an error determining the
// appropriate action to take for the current death test; for example, // appropriate action to take for the current death test; for example,
@ -80,18 +82,27 @@ class GTEST_API_ DeathTest {
// argument is set. If the death test should be skipped, the pointer // argument is set. If the death test should be skipped, the pointer
// is set to NULL; otherwise, it is set to the address of a new concrete // is set to NULL; otherwise, it is set to the address of a new concrete
// DeathTest object that controls the execution of the current test. // DeathTest object that controls the execution of the current test.
static bool Create(const char* statement, Matcher<const std::string&> matcher, static bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
const char* file, int line, DeathTest** test); DeathTest **test);
DeathTest(); DeathTest();
virtual ~DeathTest() { } virtual ~DeathTest()
{
}
// A helper class that aborts a death test when it's deleted. // A helper class that aborts a death test when it's deleted.
class ReturnSentinel { class ReturnSentinel
{
public: public:
explicit ReturnSentinel(DeathTest* test) : test_(test) { } explicit ReturnSentinel(DeathTest *test) : test_(test)
~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } {
}
~ReturnSentinel()
{
test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT);
}
private: private:
DeathTest* const test_; DeathTest *const test_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
} GTEST_ATTRIBUTE_UNUSED_; } GTEST_ATTRIBUTE_UNUSED_;
@ -100,10 +111,15 @@ class GTEST_API_ DeathTest {
// be executed immediately. OVERSEE means that the program should prepare // be executed immediately. OVERSEE means that the program should prepare
// the appropriate environment for a child process to execute the death // the appropriate environment for a child process to execute the death
// test, then wait for it to complete. // test, then wait for it to complete.
enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; enum TestRole
{
OVERSEE_TEST,
EXECUTE_TEST
};
// An enumeration of the three reasons that a test might be aborted. // An enumeration of the three reasons that a test might be aborted.
enum AbortReason { enum AbortReason
{
TEST_ENCOUNTERED_RETURN_STATEMENT, TEST_ENCOUNTERED_RETURN_STATEMENT,
TEST_THREW_EXCEPTION, TEST_THREW_EXCEPTION,
TEST_DID_NOT_DIE TEST_DID_NOT_DIE
@ -129,9 +145,9 @@ class GTEST_API_ DeathTest {
// Returns a human-readable outcome message regarding the outcome of // Returns a human-readable outcome message regarding the outcome of
// the last death test. // the last death test.
static const char* LastMessage(); static const char *LastMessage();
static void set_last_death_test_message(const std::string& message); static void set_last_death_test_message(const std::string &message);
private: private:
// A string containing a description of the outcome of the last death test. // A string containing a description of the outcome of the last death test.
@ -143,19 +159,22 @@ class GTEST_API_ DeathTest {
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// Factory interface for death tests. May be mocked out for testing. // Factory interface for death tests. May be mocked out for testing.
class DeathTestFactory { class DeathTestFactory
{
public: public:
virtual ~DeathTestFactory() { } virtual ~DeathTestFactory()
virtual bool Create(const char* statement, {
Matcher<const std::string&> matcher, const char* file, }
int line, DeathTest** test) = 0; virtual bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
DeathTest **test) = 0;
}; };
// A concrete DeathTestFactory implementation for normal use. // A concrete DeathTestFactory implementation for normal use.
class DefaultDeathTestFactory : public DeathTestFactory { class DefaultDeathTestFactory : public DeathTestFactory
{
public: public:
bool Create(const char* statement, Matcher<const std::string&> matcher, bool Create(const char *statement, Matcher<const std::string &> matcher, const char *file, int line,
const char* file, int line, DeathTest** test) override; DeathTest **test) override;
}; };
// Returns true if exit_status describes a process that was terminated // Returns true if exit_status describes a process that was terminated
@ -165,73 +184,80 @@ GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
// A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads // A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads
// and interpreted as a regex (rather than an Eq matcher) for legacy // and interpreted as a regex (rather than an Eq matcher) for legacy
// compatibility. // compatibility.
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(::testing::internal::RE regex)
::testing::internal::RE regex) { {
return ContainsRegex(regex.pattern()); return ContainsRegex(regex.pattern());
} }
inline Matcher<const ::std::string&> MakeDeathTestMatcher(const char* regex) { inline Matcher<const ::std::string &> MakeDeathTestMatcher(const char *regex)
{
return ContainsRegex(regex); return ContainsRegex(regex);
} }
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(const ::std::string &regex)
const ::std::string& regex) { {
return ContainsRegex(regex); return ContainsRegex(regex);
} }
// If a Matcher<const ::std::string&> is passed to EXPECT_DEATH (etc.), it's // If a Matcher<const ::std::string&> is passed to EXPECT_DEATH (etc.), it's
// used directly. // used directly.
inline Matcher<const ::std::string&> MakeDeathTestMatcher( inline Matcher<const ::std::string &> MakeDeathTestMatcher(Matcher<const ::std::string &> matcher)
Matcher<const ::std::string&> matcher) { {
return matcher; return matcher;
} }
// Traps C++ exceptions escaping statement and reports them as test // Traps C++ exceptions escaping statement and reports them as test
// failures. Note that trapping SEH exceptions is not implemented here. // failures. Note that trapping SEH exceptions is not implemented here.
# if GTEST_HAS_EXCEPTIONS #if GTEST_HAS_EXCEPTIONS
# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ #define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
try { \ try \
{ \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} catch (const ::std::exception& gtest_exception) { \ } \
fprintf(\ catch (const ::std::exception &gtest_exception) \
stderr, \ { \
fprintf(stderr, \
"\n%s: Caught std::exception-derived exception escaping the " \ "\n%s: Caught std::exception-derived exception escaping the " \
"death test statement. Exception message: %s\n", \ "death test statement. Exception message: %s\n", \
::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), gtest_exception.what()); \
gtest_exception.what()); \
fflush(stderr); \ fflush(stderr); \
death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
} catch (...) { \ } \
catch (...) \
{ \
death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
} }
# else #else
# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ #define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
# endif #endif
// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, // This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
// ASSERT_EXIT*, and EXPECT_EXIT*. // ASSERT_EXIT*, and EXPECT_EXIT*.
#define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \ #define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
::testing::internal::DeathTest* gtest_dt; \ { \
if (!::testing::internal::DeathTest::Create( \ ::testing::internal::DeathTest *gtest_dt; \
#statement, \ if (!::testing::internal::DeathTest::Create(#statement, \
::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \ ::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \
__FILE__, __LINE__, &gtest_dt)) { \ __FILE__, __LINE__, &gtest_dt)) \
{ \
goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
} \ } \
if (gtest_dt != nullptr) { \ if (gtest_dt != nullptr) \
std::unique_ptr< ::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \ { \
switch (gtest_dt->AssumeRole()) { \ std::unique_ptr<::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \
switch (gtest_dt->AssumeRole()) \
{ \
case ::testing::internal::DeathTest::OVERSEE_TEST: \ case ::testing::internal::DeathTest::OVERSEE_TEST: \
if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) \
{ \
goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
} \ } \
break; \ break; \
case ::testing::internal::DeathTest::EXECUTE_TEST: { \ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
::testing::internal::DeathTest::ReturnSentinel gtest_sentinel( \ ::testing::internal::DeathTest::ReturnSentinel gtest_sentinel(gtest_dt); \
gtest_dt); \
GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
break; \ break; \
@ -240,9 +266,9 @@ inline Matcher<const ::std::string&> MakeDeathTestMatcher(
break; \ break; \
} \ } \
} \ } \
} else \ } \
GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) \ else \
: fail(::testing::internal::DeathTest::LastMessage()) GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) : fail(::testing::internal::DeathTest::LastMessage())
// The symbol "fail" here expands to something into which a message // The symbol "fail" here expands to something into which a message
// can be streamed. // can be streamed.
@ -253,34 +279,50 @@ inline Matcher<const ::std::string&> MakeDeathTestMatcher(
// warnings and to avoid an expression that doesn't compile in debug mode. // warnings and to avoid an expression that doesn't compile in debug mode.
#define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \ #define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \ if (::testing::internal::AlwaysTrue()) \
{ \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} else if (!::testing::internal::AlwaysTrue()) { \ } \
else if (!::testing::internal::AlwaysTrue()) \
{ \
::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \ ::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \
} else \ } \
else \
::testing::Message() ::testing::Message()
// A class representing the parsed contents of the // A class representing the parsed contents of the
// --gtest_internal_run_death_test flag, as it existed when // --gtest_internal_run_death_test flag, as it existed when
// RUN_ALL_TESTS was called. // RUN_ALL_TESTS was called.
class InternalRunDeathTestFlag { class InternalRunDeathTestFlag
{
public: public:
InternalRunDeathTestFlag(const std::string& a_file, InternalRunDeathTestFlag(const std::string &a_file, int a_line, int an_index, int a_write_fd)
int a_line, : file_(a_file), line_(a_line), index_(an_index), write_fd_(a_write_fd)
int an_index, {
int a_write_fd) }
: file_(a_file), line_(a_line), index_(an_index),
write_fd_(a_write_fd) {}
~InternalRunDeathTestFlag() { ~InternalRunDeathTestFlag()
{
if (write_fd_ >= 0) if (write_fd_ >= 0)
posix::Close(write_fd_); posix::Close(write_fd_);
} }
const std::string& file() const { return file_; } const std::string &file() const
int line() const { return line_; } {
int index() const { return index_; } return file_;
int write_fd() const { return write_fd_; } }
int line() const
{
return line_;
}
int index() const
{
return index_;
}
int write_fd() const
{
return write_fd_;
}
private: private:
std::string file_; std::string file_;
@ -294,7 +336,7 @@ class InternalRunDeathTestFlag {
// Returns a newly created InternalRunDeathTestFlag object with fields // Returns a newly created InternalRunDeathTestFlag object with fields
// initialized from the GTEST_FLAG(internal_run_death_test) flag if // initialized from the GTEST_FLAG(internal_run_death_test) flag if
// the flag is specified; otherwise returns NULL. // the flag is specified; otherwise returns NULL.
InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); InternalRunDeathTestFlag *ParseInternalRunDeathTestFlag();
#endif // GTEST_HAS_DEATH_TEST #endif // GTEST_HAS_DEATH_TEST

View File

@ -42,11 +42,12 @@
#include "gtest/internal/gtest-string.h" #include "gtest/internal/gtest-string.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
/* class A needs to have dll-interface to be used by clients of class B */)
namespace testing { namespace testing
namespace internal { {
namespace internal
{
// FilePath - a class for file and directory pathname manipulation which // FilePath - a class for file and directory pathname manipulation which
// handles platform-specific conventions (like the pathname separator). // handles platform-specific conventions (like the pathname separator).
@ -59,26 +60,40 @@ namespace internal {
// Names are NOT checked for syntax correctness -- no checking for illegal // Names are NOT checked for syntax correctness -- no checking for illegal
// characters, malformed paths, etc. // characters, malformed paths, etc.
class GTEST_API_ FilePath { class GTEST_API_ FilePath
{
public: public:
FilePath() : pathname_("") { } FilePath() : pathname_("")
FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } {
}
FilePath(const FilePath &rhs) : pathname_(rhs.pathname_)
{
}
explicit FilePath(const std::string& pathname) : pathname_(pathname) { explicit FilePath(const std::string &pathname) : pathname_(pathname)
{
Normalize(); Normalize();
} }
FilePath& operator=(const FilePath& rhs) { FilePath &operator=(const FilePath &rhs)
{
Set(rhs); Set(rhs);
return *this; return *this;
} }
void Set(const FilePath& rhs) { void Set(const FilePath &rhs)
{
pathname_ = rhs.pathname_; pathname_ = rhs.pathname_;
} }
const std::string& string() const { return pathname_; } const std::string &string() const
const char* c_str() const { return pathname_.c_str(); } {
return pathname_;
}
const char *c_str() const
{
return pathname_.c_str();
}
// Returns the current working directory, or "" if unsuccessful. // Returns the current working directory, or "" if unsuccessful.
static FilePath GetCurrentDir(); static FilePath GetCurrentDir();
@ -87,16 +102,13 @@ class GTEST_API_ FilePath {
// extension = "xml", returns "dir/test.xml". If number is greater // extension = "xml", returns "dir/test.xml". If number is greater
// than zero (e.g., 12), returns "dir/test_12.xml". // than zero (e.g., 12), returns "dir/test_12.xml".
// On Windows platform, uses \ as the separator rather than /. // On Windows platform, uses \ as the separator rather than /.
static FilePath MakeFileName(const FilePath& directory, static FilePath MakeFileName(const FilePath &directory, const FilePath &base_name, int number,
const FilePath& base_name, const char *extension);
int number,
const char* extension);
// Given directory = "dir", relative_path = "test.xml", // Given directory = "dir", relative_path = "test.xml",
// returns "dir/test.xml". // returns "dir/test.xml".
// On Windows, uses \ as the separator rather than /. // On Windows, uses \ as the separator rather than /.
static FilePath ConcatPaths(const FilePath& directory, static FilePath ConcatPaths(const FilePath &directory, const FilePath &relative_path);
const FilePath& relative_path);
// Returns a pathname for a file that does not currently exist. The pathname // Returns a pathname for a file that does not currently exist. The pathname
// will be directory/base_name.extension or // will be directory/base_name.extension or
@ -106,12 +118,13 @@ class GTEST_API_ FilePath {
// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
// There could be a race condition if two or more processes are calling this // There could be a race condition if two or more processes are calling this
// function at the same time -- they could both pick the same filename. // function at the same time -- they could both pick the same filename.
static FilePath GenerateUniqueFileName(const FilePath& directory, static FilePath GenerateUniqueFileName(const FilePath &directory, const FilePath &base_name, const char *extension);
const FilePath& base_name,
const char* extension);
// Returns true if and only if the path is "". // Returns true if and only if the path is "".
bool IsEmpty() const { return pathname_.empty(); } bool IsEmpty() const
{
return pathname_.empty();
}
// If input name has a trailing separator character, removes it and returns // If input name has a trailing separator character, removes it and returns
// the name, otherwise return the name string unmodified. // the name, otherwise return the name string unmodified.
@ -138,7 +151,7 @@ class GTEST_API_ FilePath {
// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
// FilePath("dir/file"). If a case-insensitive extension is not // FilePath("dir/file"). If a case-insensitive extension is not
// found, returns a copy of the original FilePath. // found, returns a copy of the original FilePath.
FilePath RemoveExtension(const char* extension) const; FilePath RemoveExtension(const char *extension) const;
// Creates directories so that path exists. Returns true if successful or if // Creates directories so that path exists. Returns true if successful or if
// the directories already exist; returns false if unable to create // the directories already exist; returns false if unable to create
@ -198,7 +211,7 @@ class GTEST_API_ FilePath {
// Returns a pointer to the last occurence of a valid path separator in // Returns a pointer to the last occurence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path // the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found. // separators. Returns NULL if no path separator was found.
const char* FindLastPathSeparator() const; const char *FindLastPathSeparator() const;
std::string pathname_; std::string pathname_;
}; // class FilePath }; // class FilePath

View File

@ -37,69 +37,69 @@
// Determines the platform on which Google Test is compiled. // Determines the platform on which Google Test is compiled.
#ifdef __CYGWIN__ #ifdef __CYGWIN__
# define GTEST_OS_CYGWIN 1 #define GTEST_OS_CYGWIN 1
# elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__) #elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)
# define GTEST_OS_WINDOWS_MINGW 1 #define GTEST_OS_WINDOWS_MINGW 1
# define GTEST_OS_WINDOWS 1 #define GTEST_OS_WINDOWS 1
#elif defined _WIN32 #elif defined _WIN32
# define GTEST_OS_WINDOWS 1 #define GTEST_OS_WINDOWS 1
# ifdef _WIN32_WCE #ifdef _WIN32_WCE
# define GTEST_OS_WINDOWS_MOBILE 1 #define GTEST_OS_WINDOWS_MOBILE 1
# elif defined(WINAPI_FAMILY) #elif defined(WINAPI_FAMILY)
# include <winapifamily.h> #include <winapifamily.h>
# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
# define GTEST_OS_WINDOWS_PHONE 1 #define GTEST_OS_WINDOWS_PHONE 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
# define GTEST_OS_WINDOWS_RT 1 #define GTEST_OS_WINDOWS_RT 1
# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE) #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE)
# define GTEST_OS_WINDOWS_PHONE 1 #define GTEST_OS_WINDOWS_PHONE 1
# define GTEST_OS_WINDOWS_TV_TITLE 1 #define GTEST_OS_WINDOWS_TV_TITLE 1
# else #else
// WINAPI_FAMILY defined but no known partition matched. // WINAPI_FAMILY defined but no known partition matched.
// Default to desktop. // Default to desktop.
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# endif #endif
# else #else
# define GTEST_OS_WINDOWS_DESKTOP 1 #define GTEST_OS_WINDOWS_DESKTOP 1
# endif // _WIN32_WCE #endif // _WIN32_WCE
#elif defined __OS2__ #elif defined __OS2__
# define GTEST_OS_OS2 1 #define GTEST_OS_OS2 1
#elif defined __APPLE__ #elif defined __APPLE__
# define GTEST_OS_MAC 1 #define GTEST_OS_MAC 1
# if TARGET_OS_IPHONE #if TARGET_OS_IPHONE
# define GTEST_OS_IOS 1 #define GTEST_OS_IOS 1
# endif #endif
#elif defined __DragonFly__ #elif defined __DragonFly__
# define GTEST_OS_DRAGONFLY 1 #define GTEST_OS_DRAGONFLY 1
#elif defined __FreeBSD__ #elif defined __FreeBSD__
# define GTEST_OS_FREEBSD 1 #define GTEST_OS_FREEBSD 1
#elif defined __Fuchsia__ #elif defined __Fuchsia__
# define GTEST_OS_FUCHSIA 1 #define GTEST_OS_FUCHSIA 1
#elif defined(__GLIBC__) && defined(__FreeBSD_kernel__) #elif defined(__GLIBC__) && defined(__FreeBSD_kernel__)
# define GTEST_OS_GNU_KFREEBSD 1 #define GTEST_OS_GNU_KFREEBSD 1
#elif defined __linux__ #elif defined __linux__
# define GTEST_OS_LINUX 1 #define GTEST_OS_LINUX 1
# if defined __ANDROID__ #if defined __ANDROID__
# define GTEST_OS_LINUX_ANDROID 1 #define GTEST_OS_LINUX_ANDROID 1
# endif #endif
#elif defined __MVS__ #elif defined __MVS__
# define GTEST_OS_ZOS 1 #define GTEST_OS_ZOS 1
#elif defined(__sun) && defined(__SVR4) #elif defined(__sun) && defined(__SVR4)
# define GTEST_OS_SOLARIS 1 #define GTEST_OS_SOLARIS 1
#elif defined(_AIX) #elif defined(_AIX)
# define GTEST_OS_AIX 1 #define GTEST_OS_AIX 1
#elif defined(__hpux) #elif defined(__hpux)
# define GTEST_OS_HPUX 1 #define GTEST_OS_HPUX 1
#elif defined __native_client__ #elif defined __native_client__
# define GTEST_OS_NACL 1 #define GTEST_OS_NACL 1
#elif defined __NetBSD__ #elif defined __NetBSD__
# define GTEST_OS_NETBSD 1 #define GTEST_OS_NETBSD 1
#elif defined __OpenBSD__ #elif defined __OpenBSD__
# define GTEST_OS_OPENBSD 1 #define GTEST_OS_OPENBSD 1
#elif defined __QNX__ #elif defined __QNX__
# define GTEST_OS_QNX 1 #define GTEST_OS_QNX 1
#elif defined(__HAIKU__) #elif defined(__HAIKU__)
#define GTEST_OS_HAIKU 1 #define GTEST_OS_HAIKU 1
#endif // __CYGWIN__ #endif // __CYGWIN__

View File

@ -43,7 +43,7 @@
#ifdef __BORLANDC__ #ifdef __BORLANDC__
// string.h is not guaranteed to provide strcpy on C++ Builder. // string.h is not guaranteed to provide strcpy on C++ Builder.
# include <mem.h> #include <mem.h>
#endif #endif
#include <string.h> #include <string.h>
@ -51,11 +51,14 @@
#include "gtest/internal/gtest-port.h" #include "gtest/internal/gtest-port.h"
namespace testing { namespace testing
namespace internal { {
namespace internal
{
// String - an abstract class holding static string utilities. // String - an abstract class holding static string utilities.
class GTEST_API_ String { class GTEST_API_ String
{
public: public:
// Static utility methods // Static utility methods
@ -66,7 +69,7 @@ class GTEST_API_ String {
// //
// This is different from strdup() in string.h, which allocates // This is different from strdup() in string.h, which allocates
// memory using malloc(). // memory using malloc().
static const char* CloneCString(const char* c_str); static const char *CloneCString(const char *c_str);
#if GTEST_OS_WINDOWS_MOBILE #if GTEST_OS_WINDOWS_MOBILE
// Windows CE does not have the 'ANSI' versions of Win32 APIs. To be // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
@ -81,7 +84,7 @@ class GTEST_API_ String {
// The wide string is created using the ANSI codepage (CP_ACP) to // The wide string is created using the ANSI codepage (CP_ACP) to
// match the behaviour of the ANSI versions of Win32 calls and the // match the behaviour of the ANSI versions of Win32 calls and the
// C runtime. // C runtime.
static LPCWSTR AnsiToUtf16(const char* c_str); static LPCWSTR AnsiToUtf16(const char *c_str);
// Creates an ANSI string from the given wide string, allocating // Creates an ANSI string from the given wide string, allocating
// memory using new. The caller is responsible for deleting the return // memory using new. The caller is responsible for deleting the return
@ -91,7 +94,7 @@ class GTEST_API_ String {
// The returned string is created using the ANSI codepage (CP_ACP) to // The returned string is created using the ANSI codepage (CP_ACP) to
// match the behaviour of the ANSI versions of Win32 calls and the // match the behaviour of the ANSI versions of Win32 calls and the
// C runtime. // C runtime.
static const char* Utf16ToAnsi(LPCWSTR utf16_str); static const char *Utf16ToAnsi(LPCWSTR utf16_str);
#endif #endif
// Compares two C strings. Returns true if and only if they have the same // Compares two C strings. Returns true if and only if they have the same
@ -100,13 +103,13 @@ class GTEST_API_ String {
// Unlike strcmp(), this function can handle NULL argument(s). A // Unlike strcmp(), this function can handle NULL argument(s). A
// NULL C string is considered different to any non-NULL C string, // NULL C string is considered different to any non-NULL C string,
// including the empty string. // including the empty string.
static bool CStringEquals(const char* lhs, const char* rhs); static bool CStringEquals(const char *lhs, const char *rhs);
// Converts a wide C string to a String using the UTF-8 encoding. // Converts a wide C string to a String using the UTF-8 encoding.
// NULL will be converted to "(null)". If an error occurred during // NULL will be converted to "(null)". If an error occurred during
// the conversion, "(failed to convert from wide string)" is // the conversion, "(failed to convert from wide string)" is
// returned. // returned.
static std::string ShowWideCString(const wchar_t* wide_c_str); static std::string ShowWideCString(const wchar_t *wide_c_str);
// Compares two wide C strings. Returns true if and only if they have the // Compares two wide C strings. Returns true if and only if they have the
// same content. // same content.
@ -114,7 +117,7 @@ class GTEST_API_ String {
// Unlike wcscmp(), this function can handle NULL argument(s). A // Unlike wcscmp(), this function can handle NULL argument(s). A
// NULL C string is considered different to any non-NULL C string, // NULL C string is considered different to any non-NULL C string,
// including the empty string. // including the empty string.
static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); static bool WideCStringEquals(const wchar_t *lhs, const wchar_t *rhs);
// Compares two C strings, ignoring case. Returns true if and only if // Compares two C strings, ignoring case. Returns true if and only if
// they have the same content. // they have the same content.
@ -122,8 +125,7 @@ class GTEST_API_ String {
// Unlike strcasecmp(), this function can handle NULL argument(s). // Unlike strcasecmp(), this function can handle NULL argument(s).
// A NULL C string is considered different to any non-NULL C string, // A NULL C string is considered different to any non-NULL C string,
// including the empty string. // including the empty string.
static bool CaseInsensitiveCStringEquals(const char* lhs, static bool CaseInsensitiveCStringEquals(const char *lhs, const char *rhs);
const char* rhs);
// Compares two wide C strings, ignoring case. Returns true if and only if // Compares two wide C strings, ignoring case. Returns true if and only if
// they have the same content. // they have the same content.
@ -137,13 +139,11 @@ class GTEST_API_ String {
// which compares according to LC_CTYPE category of the current locale. // which compares according to LC_CTYPE category of the current locale.
// On MacOS X, it uses towlower, which also uses LC_CTYPE category of the // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
// current locale. // current locale.
static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, static bool CaseInsensitiveWideCStringEquals(const wchar_t *lhs, const wchar_t *rhs);
const wchar_t* rhs);
// Returns true if and only if the given string ends with the given suffix, // Returns true if and only if the given string ends with the given suffix,
// ignoring case. Any string is considered to end with an empty suffix. // ignoring case. Any string is considered to end with an empty suffix.
static bool EndsWithCaseInsensitive( static bool EndsWithCaseInsensitive(const std::string &str, const std::string &suffix);
const std::string& str, const std::string& suffix);
// Formats an int value as "%02d". // Formats an int value as "%02d".
static std::string FormatIntWidth2(int value); // "%02d" for width == 2 static std::string FormatIntWidth2(int value); // "%02d" for width == 2
@ -163,7 +163,7 @@ class GTEST_API_ String {
// Gets the content of the stringstream's buffer as an std::string. Each '\0' // Gets the content of the stringstream's buffer as an std::string. Each '\0'
// character in the buffer is replaced with "\\0". // character in the buffer is replaced with "\\0".
GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); GTEST_API_ std::string StringStreamToString(::std::stringstream *stream);
} // namespace internal } // namespace internal
} // namespace testing } // namespace testing

View File

@ -27,8 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This provides interface PrimeTable that determines whether a number is a // This provides interface PrimeTable that determines whether a number is a
// prime and determines a next prime number. This interface is used // prime and determines a next prime number. This interface is used
// in Google Test samples demonstrating use of parameterized tests. // in Google Test samples demonstrating use of parameterized tests.
@ -39,9 +37,12 @@
#include <algorithm> #include <algorithm>
// The prime table interface. // The prime table interface.
class PrimeTable { class PrimeTable
{
public: public:
virtual ~PrimeTable() {} virtual ~PrimeTable()
{
}
// Returns true if and only if n is a prime number. // Returns true if and only if n is a prime number.
virtual bool IsPrime(int n) const = 0; virtual bool IsPrime(int n) const = 0;
@ -52,22 +53,30 @@ class PrimeTable {
}; };
// Implementation #1 calculates the primes on-the-fly. // Implementation #1 calculates the primes on-the-fly.
class OnTheFlyPrimeTable : public PrimeTable { class OnTheFlyPrimeTable : public PrimeTable
{
public: public:
bool IsPrime(int n) const override { bool IsPrime(int n) const override
if (n <= 1) return false; {
if (n <= 1)
return false;
for (int i = 2; i*i <= n; i++) { for (int i = 2; i * i <= n; i++)
{
// n is divisible by an integer other than 1 and itself. // n is divisible by an integer other than 1 and itself.
if ((n % i) == 0) return false; if ((n % i) == 0)
return false;
} }
return true; return true;
} }
int GetNextPrime(int p) const override { int GetNextPrime(int p) const override
for (int n = p + 1; n > 0; n++) { {
if (IsPrime(n)) return n; for (int n = p + 1; n > 0; n++)
{
if (IsPrime(n))
return n;
} }
return -1; return -1;
@ -76,51 +85,63 @@ class OnTheFlyPrimeTable : public PrimeTable {
// Implementation #2 pre-calculates the primes and stores the result // Implementation #2 pre-calculates the primes and stores the result
// in an array. // in an array.
class PreCalculatedPrimeTable : public PrimeTable { class PreCalculatedPrimeTable : public PrimeTable
{
public: public:
// 'max' specifies the maximum number the prime table holds. // 'max' specifies the maximum number the prime table holds.
explicit PreCalculatedPrimeTable(int max) explicit PreCalculatedPrimeTable(int max) : is_prime_size_(max + 1), is_prime_(new bool[max + 1])
: is_prime_size_(max + 1), is_prime_(new bool[max + 1]) { {
CalculatePrimesUpTo(max); CalculatePrimesUpTo(max);
} }
~PreCalculatedPrimeTable() override { delete[] is_prime_; } ~PreCalculatedPrimeTable() override
{
delete[] is_prime_;
}
bool IsPrime(int n) const override { bool IsPrime(int n) const override
{
return 0 <= n && n < is_prime_size_ && is_prime_[n]; return 0 <= n && n < is_prime_size_ && is_prime_[n];
} }
int GetNextPrime(int p) const override { int GetNextPrime(int p) const override
for (int n = p + 1; n < is_prime_size_; n++) { {
if (is_prime_[n]) return n; for (int n = p + 1; n < is_prime_size_; n++)
{
if (is_prime_[n])
return n;
} }
return -1; return -1;
} }
private: private:
void CalculatePrimesUpTo(int max) { void CalculatePrimesUpTo(int max)
{
::std::fill(is_prime_, is_prime_ + is_prime_size_, true); ::std::fill(is_prime_, is_prime_ + is_prime_size_, true);
is_prime_[0] = is_prime_[1] = false; is_prime_[0] = is_prime_[1] = false;
// Checks every candidate for prime number (we know that 2 is the only even // Checks every candidate for prime number (we know that 2 is the only even
// prime). // prime).
for (int i = 2; i*i <= max; i += i%2+1) { for (int i = 2; i * i <= max; i += i % 2 + 1)
if (!is_prime_[i]) continue; {
if (!is_prime_[i])
continue;
// Marks all multiples of i (except i itself) as non-prime. // Marks all multiples of i (except i itself) as non-prime.
// We are starting here from i-th multiplier, because all smaller // We are starting here from i-th multiplier, because all smaller
// complex numbers were already marked. // complex numbers were already marked.
for (int j = i*i; j <= max; j += i) { for (int j = i * i; j <= max; j += i)
{
is_prime_[j] = false; is_prime_[j] = false;
} }
} }
} }
const int is_prime_size_; const int is_prime_size_;
bool* const is_prime_; bool *const is_prime_;
// Disables compiler warning "assignment operator could not be generated." // Disables compiler warning "assignment operator could not be generated."
void operator=(const PreCalculatedPrimeTable& rhs); void operator=(const PreCalculatedPrimeTable &rhs);
}; };
#endif // GTEST_SAMPLES_PRIME_TABLES_H_ #endif // GTEST_SAMPLES_PRIME_TABLES_H_

View File

@ -32,9 +32,11 @@
#include "sample1.h" #include "sample1.h"
// Returns n! (the factorial of n). For negative n, n! is defined to be 1. // Returns n! (the factorial of n). For negative n, n! is defined to be 1.
int Factorial(int n) { int Factorial(int n)
{
int result = 1; int result = 1;
for (int i = 1; i <= n; i++) { for (int i = 1; i <= n; i++)
{
result *= i; result *= i;
} }
@ -42,23 +44,29 @@ int Factorial(int n) {
} }
// Returns true if and only if n is a prime number. // Returns true if and only if n is a prime number.
bool IsPrime(int n) { bool IsPrime(int n)
{
// Trivial case 1: small numbers // Trivial case 1: small numbers
if (n <= 1) return false; if (n <= 1)
return false;
// Trivial case 2: even numbers // Trivial case 2: even numbers
if (n % 2 == 0) return n == 2; if (n % 2 == 0)
return n == 2;
// Now, we have that n is odd and n >= 3. // Now, we have that n is odd and n >= 3.
// Try to divide n by every odd number i, starting from 3 // Try to divide n by every odd number i, starting from 3
for (int i = 3; ; i += 2) { for (int i = 3;; i += 2)
{
// We only have to try i up to the square root of n // We only have to try i up to the square root of n
if (i > n/i) break; if (i > n / i)
break;
// Now, we have i <= n/i < n. // Now, we have i <= n/i < n.
// If n is divisible by i, n is not prime. // If n is divisible by i, n is not prime.
if (n % i == 0) return false; if (n % i == 0)
return false;
} }
// n has no integer factor in the range (1, n), and thus is prime. // n has no integer factor in the range (1, n), and thus is prime.

Some files were not shown because too many files have changed in this diff Show More