Merge Gitee PR#21 from upd/1.5.0
This commit is contained in:
ShikiSuen 2022-04-11 16:12:23 +00:00 committed by Gitee
commit 5289915bcb
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
507 changed files with 81528 additions and 64975 deletions

View File

@ -21,7 +21,7 @@
"AmbiguousTrailingClosureOverload" : true,
"BeginDocumentationCommentWithOneLineSummary" : false,
"DoNotUseSemicolons" : true,
"DontRepeatTypeInStaticProperties" : true,
"DontRepeatTypeInStaticProperties" : false,
"FileScopedDeclarationPrivacy" : true,
"FullyIndirectEnum" : true,
"GroupNumericLiterals" : true,
@ -39,7 +39,7 @@
"NoVoidReturnOnFunctionSignature" : true,
"OneCasePerLine" : true,
"OneVariableDeclarationPerLine" : true,
"OnlyOneTrailingClosureArgument" : true,
"OnlyOneTrailingClosureArgument" : false,
"OrderedImports" : true,
"ReturnVoidInsteadOfEmptyTuple" : true,
"UseEarlyExits" : false,
@ -47,7 +47,7 @@
"UseShorthandTypeNames" : true,
"UseSingleLinePropertyGetter" : true,
"UseSynthesizedInitializer" : true,
"UseTripleSlashForDocumentationComments" : true,
"UseTripleSlashForDocumentationComments" : false,
"UseWhereClausesInForLoops" : false,
"ValidateDocumentationComments" : false
},

85
BuildVersionSpecifier.swift Executable file
View File

@ -0,0 +1,85 @@
#!/usr/bin/env swift
// Copyright (c) 2021 and onwards The vChewing Project (MIT-NTL License).
/*
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
1. The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
2. No trademark license is granted to use the trade names, trademarks, service
marks, or product names of Contributor, except as required to fulfill notice
requirements above.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
import Cocoa
extension String {
fileprivate mutating func regReplace(pattern: String, replaceWith: String = "") {
// Ref: https://stackoverflow.com/a/40993403/4162914 && https://stackoverflow.com/a/71291137/4162914
do {
let regex = try NSRegularExpression(
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines])
let range = NSRange(self.startIndex..., in: self)
self = regex.stringByReplacingMatches(
in: self, options: [], range: range, withTemplate: replaceWith)
} catch { return }
}
}
var verMarket: String = "1.0.0"
var verBuild: String = "1000"
var strXcodeProjContent: String = ""
var dirXcodeProjectFile = "./vChewing.xcodeproj/project.pbxproj"
var dirPackageProjectFile = "./vChewing.pkgproj"
var dirUpdateInfoPlist = "./Update-Info.plist"
var theDictionary: NSDictionary?
if CommandLine.arguments.count == 3 {
verMarket = CommandLine.arguments[1]
verBuild = CommandLine.arguments[2]
// Xcode project file version update.
do {
strXcodeProjContent += try String(contentsOfFile: dirXcodeProjectFile, encoding: .utf8)
} catch {
NSLog(" - Exception happened when reading raw phrases data.")
}
strXcodeProjContent.regReplace(
pattern: #"CURRENT_PROJECT_VERSION = .*$"#, replaceWith: "CURRENT_PROJECT_VERSION = " + verBuild + ";")
strXcodeProjContent.regReplace(
pattern: #"MARKETING_VERSION = .*$"#, replaceWith: "MARKETING_VERSION = " + verMarket + ";")
do {
try strXcodeProjContent.write(to: URL(fileURLWithPath: dirXcodeProjectFile), atomically: false, encoding: .utf8)
} catch {
NSLog(" -: Error on writing strings to file: \(error)")
}
NSLog(" - Xcode 專案版本資訊更新完成:\(verMarket) \(verBuild)")
// Packages project file version update.
theDictionary = NSDictionary(contentsOfFile: dirPackageProjectFile)
theDictionary?.setValue(verMarket, forKeyPath: "PACKAGES.PACKAGE_SETTINGS.VERSION")
theDictionary?.write(toFile: dirPackageProjectFile, atomically: true)
NSLog(" - Packages 專案版本資訊更新完成:\(verMarket) \(verBuild)")
// Update notification project file version update.
theDictionary = NSDictionary(contentsOfFile: dirUpdateInfoPlist)
theDictionary?.setValue(verBuild, forKeyPath: "CFBundleVersion")
theDictionary?.setValue(verMarket, forKeyPath: "CFBundleShortVersionString")
theDictionary?.write(toFile: dirUpdateInfoPlist, atomically: true)
NSLog(" - 更新用通知 plist 版本資訊更新完成:\(verMarket) \(verBuild)")
}

View File

@ -40,11 +40,6 @@ extension String {
}
}
private func getDocumentsDirectory() -> URL {
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
return paths[0]
}
// MARK: -
// Ref: https://stackoverflow.com/a/32581409/4162914
extension Float {

View File

@ -78,7 +78,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
return
}
self.installingVersion = installingVersion
self.archiveUtil = ArchiveUtil(appName: kTargetBin, targetAppBundleName: kTargetBundle)
archiveUtil = ArchiveUtil(appName: kTargetBin, targetAppBundleName: kTargetBundle)
_ = archiveUtil?.validateIfNotarizedArchiveExists()
cancelButton.nextKeyView = installButton
@ -153,7 +153,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
atPath: (kTargetPartialPath as NSString).expandingTildeInPath)
== false
{
self.installInputMethod(
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
return
}
@ -208,7 +208,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
timeInterval: kTranslocationRemovalTickInterval, target: self,
selector: #selector(timerTick(_:)), userInfo: nil, repeats: true)
} else {
self.installInputMethod(
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
}
}
@ -321,7 +321,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
ntfPostInstall.addButton(withTitle: NSLocalizedString("OK", comment: ""))
}
}
ntfPostInstall.beginSheetModal(for: window!) { response in
ntfPostInstall.beginSheetModal(for: window!) { _ in
self.endAppWithDelay()
}
}

View File

@ -83,7 +83,7 @@ struct ArchiveUtil {
}
func unzipNotarizedArchive() -> String? {
if !self.validateIfNotarizedArchiveExists() {
if !validateIfNotarizedArchiveExists() {
return nil
}
guard let notarizedArchive = notarizedArchive,

View File

@ -17,3 +17,5 @@
"Warning" = "Warning";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources.";
"Continue" = "Continue";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "お知らせ";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "入力アプリの自動起動はうまく出来なかったかもしれません。ご自分で「システム環境設定→キーボード→入力ソース」で起動してください。";
"Continue" = "続行";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "安装不完整";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "输入法已经安装好,但可能没有完全启用。请从「系统偏好设定」 > 「键盘」 > 「输入方式」分页加入输入法。";
"Continue" = "继续";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "安裝不完整";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "輸入法已經安裝好,但可能沒有完全啟用。請從「系統偏好設定」 > 「鍵盤」 > 「輸入方式」分頁加入輸入法。";
"Continue" = "繼續";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -1,7 +1,7 @@
#!/bin/sh
TARGET='vChewing'
login_user=`/usr/bin/stat -f%Su /dev/console`
login_user=$(/usr/bin/stat -f%Su /dev/console)
# First, copy the wrongfully installed contents to the right location:
cp -r /Library/Input\ Methods/"${TARGET}".app /Users/"${login_user}"/Library/Input\ Methods/ || true

View File

@ -20,21 +20,19 @@ debug:
DSTROOT = /Library/Input Methods
VC_APP_ROOT = $(DSTROOT)/vChewing.app
.PHONY: clang-format lint
.PHONY: clang-format lint batchfix format
format: batchfix clang-format lint
clang-format:
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./DataCompiler/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./Installer/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./Source/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./UserPhraseEditor/
@find ./Installer/ -iname '*.h' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@find ./Source/3rdParty/OVMandarin -iname '*.h' -o -iname '*.cpp' -o -iname '*.mm' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@find ./Source/Modules/ -iname '*.h' -o -iname '*.cpp' -o -iname '*.mm' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format format --in-place --configuration ./.clang-format-swift.json --parallel
@git ls-files --exclude-standard | grep -E '\.(cpp|hpp|c|cc|cxx|hxx|ixx|h|m|mm|hh)$$' | xargs clang-format -i -style=Microsoft
lint:
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./DataCompiler/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./Installer/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./Source/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./UserPhraseEditor/
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format lint --configuration ./.clang-format-swift.json --parallel
batchfix:
@git ls-files --exclude-standard | grep -E '\.swift$$' | swiftlint --fix --autocorrect
.PHONY: permission-check install-debug install-release

File diff suppressed because it is too large Load Diff

View File

@ -4,17 +4,20 @@
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace {
namespace
{
namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
std::vector<std::string> Initialize(const std::vector<std::string> &argv)
{
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char *> ptrs;
ptrs.reserve(argv.size());
for (auto& arg : argv) {
for (auto &arg : argv)
{
ptrs.push_back(const_cast<char *>(arg.c_str()));
}
ptrs[0] = const_cast<char *>(executable_name.c_str());
@ -22,23 +25,23 @@ std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
for (int i = 0; i < argc; ++i)
{
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
}
void RegisterBenchmark(const char* name, py::function f) {
benchmark::RegisterBenchmark(name, [f](benchmark::State& state) {
f(&state);
});
void RegisterBenchmark(const char *name, py::function f)
{
benchmark::RegisterBenchmark(name, [f](benchmark::State &state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m) {
PYBIND11_MODULE(_benchmark, m)
{
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark);
m.def("RunSpecifiedBenchmarks",
[]() { benchmark::RunSpecifiedBenchmarks(); });
m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); });
py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning)

View File

@ -1,12 +1,13 @@
#include <gnuregex.h>
#include <string>
int main() {
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (ec != 0)
{
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}

View File

@ -1,14 +1,15 @@
#include <regex.h>
#include <string>
int main() {
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (ec != 0)
{
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}

View File

@ -1,10 +1,9 @@
#include <regex>
#include <string>
int main() {
int main()
{
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$",
std::regex_constants::extended | std::regex_constants::nosubs);
re = std::regex("^[a-z]+[0-9]+$", std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}

View File

@ -1,6 +1,7 @@
#include <chrono>
int main() {
int main()
{
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);

View File

@ -1,4 +1,6 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main() {}
int main()
{
}

View File

@ -1,13 +1,15 @@
#include "benchmark/benchmark.h"
void BM_StringCreation(benchmark::State& state) {
void BM_StringCreation(benchmark::State &state)
{
while (state.KeepRunning())
std::string empty_string;
}
BENCHMARK(BM_StringCreation);
void BM_StringCopy(benchmark::State& state) {
void BM_StringCopy(benchmark::State &state)
{
std::string x = "hello";
while (state.KeepRunning())
std::string copy(x);

View File

@ -3,8 +3,10 @@
#include "internal_macros.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
@ -14,15 +16,13 @@ namespace internal {
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef COMPILER_MSVC
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
template <typename T, size_t N> char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))

View File

@ -106,31 +106,25 @@ DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output
DEFINE_int32(v, 0);
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
void UseCharPointer(char const volatile *)
{
}
} // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
started_(false),
finished_(false),
error_occurred_(false),
range_(ranges),
complexity_n_(0),
counters(),
thread_index(thread_i),
threads(n_threads),
timer_(timer),
manager_(manager) {
State::State(IterationCount max_iters, const std::vector<int64_t> &ranges, int thread_i, int n_threads,
internal::ThreadTimer *timer, internal::ThreadManager *manager)
: total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false),
error_occurred_(false), range_(ranges), complexity_n_(0), counters(), thread_index(thread_i), threads(n_threads),
timer_(timer), manager_(manager)
{
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
@ -149,9 +143,7 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <=
(cache_line_size - sizeof(error_occurred_)),
"");
static_assert(offsetof(State, error_occurred_) <= (cache_line_size - sizeof(error_occurred_)), "");
#if defined(__INTEL_COMPILER)
#pragma warning pop
#elif defined(__GNUC__)
@ -159,51 +151,62 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif
}
void State::PauseTiming() {
void State::PauseTiming()
{
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
}
void State::ResumeTiming() {
void State::ResumeTiming()
{
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
}
void State::SkipWithError(const char* msg) {
void State::SkipWithError(const char *msg)
{
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) {
if (manager_->results.has_error_ == false)
{
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
}
total_iterations_ = 0;
if (timer_->running()) timer_->StopTimer();
if (timer_->running())
timer_->StopTimer();
}
void State::SetIterationTime(double seconds) {
void State::SetIterationTime(double seconds)
{
timer_->SetIterationTime(seconds);
}
void State::SetLabel(const char* label) {
void State::SetLabel(const char *label)
{
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
void State::StartKeepRunning() {
void State::StartKeepRunning()
{
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming();
if (!error_occurred_)
ResumeTiming();
}
void State::FinishKeepRunning() {
void State::FinishKeepRunning()
{
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
if (!error_occurred_)
{
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
@ -212,12 +215,14 @@ void State::FinishKeepRunning() {
manager_->StartStopBarrier();
}
namespace internal {
namespace {
namespace internal
{
namespace
{
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
void RunBenchmarks(const std::vector<BenchmarkInstance> &benchmarks, BenchmarkReporter *display_reporter,
BenchmarkReporter *file_reporter)
{
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
@ -225,15 +230,16 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.str().size());
for (const BenchmarkInstance &benchmark : benchmarks)
{
name_field_width = std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1;
for (const auto &Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
if (might_have_aggregates)
name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
@ -245,21 +251,22 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter *reporter) {
if (!reporter) return;
if (!reporter)
return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
};
if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context)))
{
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) {
for (const auto &benchmark : benchmarks)
{
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter* reporter,
bool report_aggregates_only) {
auto report = [&run_results](BenchmarkReporter *reporter, bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
@ -278,7 +285,8 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
}
}
display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
if (file_reporter)
file_reporter->Finalize();
flushStreams(display_reporter);
flushStreams(file_reporter);
}
@ -290,16 +298,23 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::unique_ptr<BenchmarkReporter> CreateReporter(
std::string const& name, ConsoleReporter::OutputOptions output_opts) {
std::unique_ptr<BenchmarkReporter> CreateReporter(std::string const &name, ConsoleReporter::OutputOptions output_opts)
{
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") {
if (name == "console")
{
return PtrType(new ConsoleReporter(output_opts));
} else if (name == "json") {
}
else if (name == "json")
{
return PtrType(new JSONReporter);
} else if (name == "csv") {
}
else if (name == "csv")
{
return PtrType(new CSVReporter);
} else {
}
else
{
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
@ -311,29 +326,39 @@ std::unique_ptr<BenchmarkReporter> CreateReporter(
} // end namespace
bool IsZero(double n) {
bool IsZero(double n)
{
return std::abs(n) < std::numeric_limits<double>::epsilon();
}
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color)
{
int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color) {
if (force_no_color)
{
return false;
}
if (FLAGS_benchmark_color == "auto") {
if (FLAGS_benchmark_color == "auto")
{
return IsColorTerminal();
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color()) {
if (is_benchmark_color())
{
output_opts |= ConsoleReporter::OO_Color;
} else {
}
else
{
output_opts &= ~ConsoleReporter::OO_Color;
}
if (FLAGS_benchmark_counters_tabular) {
if (FLAGS_benchmark_counters_tabular)
{
output_opts |= ConsoleReporter::OO_Tabular;
} else {
}
else
{
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
@ -341,16 +366,18 @@ ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
} // end namespace internal
size_t RunSpecifiedBenchmarks() {
size_t RunSpecifiedBenchmarks()
{
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter)
{
return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter, BenchmarkReporter *file_reporter)
{
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
@ -359,30 +386,33 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) {
default_display_reporter = internal::CreateReporter(
FLAGS_benchmark_format, internal::GetOutputOptions());
if (!display_reporter)
{
default_display_reporter = internal::CreateReporter(FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get();
}
auto &Out = display_reporter->GetOutputStream();
auto &Err = display_reporter->GetErrorStream();
std::string const &fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
if (fname.empty() && file_reporter)
{
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty()) {
if (!fname.empty())
{
output_file.open(fname);
if (!output_file.is_open()) {
if (!output_file.is_open())
{
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter) {
default_file_reporter = internal::CreateReporter(
FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
if (!file_reporter)
{
default_file_reporter = internal::CreateReporter(FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
@ -390,32 +420,39 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
}
std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err))
return 0;
if (benchmarks.empty()) {
if (benchmarks.empty())
{
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests) {
if (FLAGS_benchmark_list_tests)
{
for (auto const &benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
} else {
}
else
{
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager* manager) {
void RegisterMemoryManager(MemoryManager *manager)
{
internal::memory_manager = manager;
}
namespace internal {
namespace internal
{
void PrintUsageAndExit() {
fprintf(stdout,
"benchmark"
void PrintUsageAndExit()
{
fprintf(stdout, "benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
@ -431,67 +468,69 @@ void PrintUsageAndExit() {
exit(0);
}
void ParseCommandLineFlags(int* argc, char** argv) {
void ParseCommandLineFlags(int *argc, char **argv)
{
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
BenchmarkReporter::Context::executable_name = (argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i)
{
if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
&FLAGS_benchmark_display_aggregates_only) ||
ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v))
{
for (int j = i; j != *argc - 1; ++j)
argv[j] = argv[j + 1];
--(*argc);
--i;
} else if (IsFlag(argv[i], "help")) {
}
else if (IsFlag(argv[i], "help"))
{
PrintUsageAndExit();
}
}
for (auto const* flag :
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv") {
for (auto const *flag : {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv")
{
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty()) {
if (FLAGS_benchmark_color.empty())
{
PrintUsageAndExit();
}
}
int InitializeStreams() {
int InitializeStreams()
{
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int* argc, char** argv) {
void Initialize(int *argc, char **argv)
{
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
argv[i]);
bool ReportUnrecognizedArguments(int argc, char **argv)
{
for (int i = 1; i < argc; ++i)
{
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
}
return argc > 1;
}

View File

@ -1,15 +1,17 @@
#include "benchmark_api_internal.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
State BenchmarkInstance::Run(IterationCount iters, int thread_id,
internal::ThreadTimer* timer,
internal::ThreadManager* manager) const {
State BenchmarkInstance::Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager *manager) const
{
State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st);
return st;
}
} // internal
} // benchmark
} // namespace internal
} // namespace benchmark

View File

@ -11,11 +11,14 @@
#include <string>
#include <vector>
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// Information kept per benchmark we may want to run
struct BenchmarkInstance {
struct BenchmarkInstance
{
BenchmarkName name;
Benchmark *benchmark;
AggregationReportMode aggregation_report_mode;
@ -39,9 +42,7 @@ struct BenchmarkInstance {
internal::ThreadManager *manager) const;
};
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
bool IsZero(double n);

View File

@ -14,26 +14,34 @@
#include <benchmark/benchmark.h>
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
// Compute the total size of a pack of std::strings
size_t size_impl() { return 0; }
size_t size_impl()
{
return 0;
}
template <typename Head, typename... Tail>
size_t size_impl(const Head& head, const Tail&... tail) {
template <typename Head, typename... Tail> size_t size_impl(const Head &head, const Tail &...tail)
{
return head.size() + size_impl(tail...);
}
// Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin
void join_impl(std::string&, char) {}
void join_impl(std::string &, char)
{
}
template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head,
const Tail&... tail) {
if (!s.empty() && !head.empty()) {
void join_impl(std::string &s, const char delimiter, const Head &head, const Tail &...tail)
{
if (!s.empty() && !head.empty())
{
s += delimiter;
}
@ -42,8 +50,8 @@ void join_impl(std::string& s, const char delimiter, const Head& head,
join_impl(s, delimiter, tail...);
}
template <typename... Ts>
std::string join(char delimiter, const Ts&... ts) {
template <typename... Ts> std::string join(char delimiter, const Ts &...ts)
{
std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...);
@ -51,8 +59,8 @@ std::string join(char delimiter, const Ts&... ts) {
}
} // namespace
std::string BenchmarkName::str() const {
return join('/', function_name, args, min_time, iterations, repetitions,
time_type, threads);
std::string BenchmarkName::str() const
{
return join('/', function_name, args, min_time, iterations, repetitions, time_type, threads);
}
} // namespace benchmark

View File

@ -52,9 +52,11 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
@ -62,7 +64,8 @@ static const int kRangeMultiplier = 8;
static const size_t kMaxFamilySize = 100;
} // end namespace
namespace internal {
namespace internal
{
//=============================================================================//
// BenchmarkFamilies
@ -70,7 +73,8 @@ namespace internal {
// Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies {
class BenchmarkFamilies
{
public:
static BenchmarkFamilies *GetInstance();
@ -82,49 +86,54 @@ class BenchmarkFamilies {
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool FindBenchmarks(std::string re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
private:
BenchmarkFamilies() {}
BenchmarkFamilies()
{
}
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
};
BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
BenchmarkFamilies *BenchmarkFamilies::GetInstance()
{
static BenchmarkFamilies instance;
return &instance;
}
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family)
{
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
}
void BenchmarkFamilies::ClearBenchmarks() {
void BenchmarkFamilies::ClearBenchmarks()
{
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
}
bool BenchmarkFamilies::FindBenchmarks(
std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
bool BenchmarkFamilies::FindBenchmarks(std::string spec, std::vector<BenchmarkInstance> *benchmarks,
std::ostream *ErrStream)
{
CHECK(ErrStream);
auto &Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if (spec[0] == '-') {
if (spec[0] == '-')
{
spec.replace(0, 1, "");
isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg)) {
if (!re.Init(spec, &error_msg))
{
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
@ -133,30 +142,36 @@ bool BenchmarkFamilies::FindBenchmarks(
const std::vector<int> one_thread = {1};
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
for (std::unique_ptr<Benchmark> &family : families_)
{
// Family was deleted or benchmark doesn't match
if (!family) continue;
if (!family)
continue;
if (family->ArgsCnt() == -1) {
if (family->ArgsCnt() == -1)
{
family->Args({});
}
const std::vector<int> *thread_counts =
(family->thread_counts_.empty()
? &one_thread
(family->thread_counts_.empty() ? &one_thread
: &static_cast<const std::vector<int> &>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) {
Err << "The number of inputs is very large. " << family->name_
<< " will be repeated at least " << family_size << " times.\n";
if (family_size > kMaxFamilySize)
{
Err << "The number of inputs is very large. " << family->name_ << " will be repeated at least "
<< family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".") benchmarks->reserve(family_size);
if (spec == ".")
benchmarks->reserve(family_size);
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
for (auto const &args : family->args_)
{
for (int num_threads : *thread_counts)
{
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
@ -177,14 +192,18 @@ bool BenchmarkFamilies::FindBenchmarks(
// Add arguments to instance name
size_t arg_i = 0;
for (auto const& arg : args) {
if (!instance.name.args.empty()) {
for (auto const &arg : args)
{
if (!instance.name.args.empty())
{
instance.name.args += '/';
}
if (arg_i < family->arg_names_.size()) {
if (arg_i < family->arg_names_.size())
{
const auto &arg_name = family->arg_names_[arg_i];
if (!arg_name.empty()) {
if (!arg_name.empty())
{
instance.name.args += StrFormat("%s:", arg_name.c_str());
}
}
@ -194,41 +213,46 @@ bool BenchmarkFamilies::FindBenchmarks(
}
if (!IsZero(family->min_time_))
instance.name.min_time =
StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0) {
instance.name.min_time = StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
{
instance.name.iterations =
StrFormat("iterations:%lu",
static_cast<unsigned long>(family->iterations_));
StrFormat("iterations:%lu", static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions =
StrFormat("repeats:%d", family->repetitions_);
instance.name.repetitions = StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_) {
if (family->measure_process_cpu_time_)
{
instance.name.time_type = "process_time";
}
if (family->use_manual_time_) {
if (!instance.name.time_type.empty()) {
if (family->use_manual_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
} else if (family->use_real_time_) {
if (!instance.name.time_type.empty()) {
}
else if (family->use_real_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
if (!family->thread_counts_.empty())
{
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) ||
(!re.Match(full_name) && isNegativeFilter)) {
if ((re.Match(full_name) && !isNegativeFilter) || (!re.Match(full_name) && isNegativeFilter))
{
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
@ -238,7 +262,8 @@ bool BenchmarkFamilies::FindBenchmarks(
return true;
}
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
Benchmark *RegisterBenchmarkInternal(Benchmark *bench)
{
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies *families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
@ -247,9 +272,8 @@ Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err) {
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err)
{
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
@ -258,72 +282,75 @@ bool FindBenchmarksInternal(const std::string& re,
//=============================================================================//
Benchmark::Benchmark(const char *name)
: name_(name),
aggregation_report_mode_(ARM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {
: name_(name), aggregation_report_mode_(ARM_Unspecified), time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), repetitions_(0),
measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone),
complexity_lambda_(nullptr)
{
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
}
Benchmark::~Benchmark() {}
Benchmark::~Benchmark()
{
}
Benchmark* Benchmark::Arg(int64_t x) {
Benchmark *Benchmark::Arg(int64_t x)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
Benchmark* Benchmark::Unit(TimeUnit unit) {
Benchmark *Benchmark::Unit(TimeUnit unit)
{
time_unit_ = unit;
return this;
}
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
Benchmark *Benchmark::Range(int64_t start, int64_t limit)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist) {
for (int64_t i : arglist)
{
args_.push_back({i});
}
return this;
}
Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
Benchmark *Benchmark::Ranges(const std::vector<std::pair<int64_t, int64_t>> &ranges)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
for (std::size_t i = 0; i < ranges.size(); i++)
{
AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_);
total *= arglists[i].size();
}
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++) {
for (std::size_t i = 0; i < total; i++)
{
std::vector<int64_t> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) {
for (std::size_t j = 0; j < arglists.size(); j++)
{
tmp.push_back(arglists[j].at(ctr[j]));
}
args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++) {
if (ctr[j] + 1 < arglists[j].size()) {
for (std::size_t j = 0; j < arglists.size(); j++)
{
if (ctr[j] + 1 < arglists[j].size())
{
++ctr[j];
break;
}
@ -333,129 +360,148 @@ Benchmark* Benchmark::Ranges(
return this;
}
Benchmark* Benchmark::ArgName(const std::string& name) {
Benchmark *Benchmark::ArgName(const std::string &name)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
Benchmark *Benchmark::ArgNames(const std::vector<std::string> &names)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
Benchmark *Benchmark::DenseRange(int64_t start, int64_t limit, int step)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
for (int64_t arg = start; arg <= limit; arg += step)
{
args_.push_back({arg});
}
return this;
}
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
Benchmark *Benchmark::Args(const std::vector<int64_t> &args)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
Benchmark *Benchmark::Apply(void (*custom_arguments)(Benchmark *benchmark))
{
custom_arguments(this);
return this;
}
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
Benchmark *Benchmark::RangeMultiplier(int multiplier)
{
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark* Benchmark::MinTime(double t) {
Benchmark *Benchmark::MinTime(double t)
{
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(IterationCount n) {
Benchmark *Benchmark::Iterations(IterationCount n)
{
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
Benchmark *Benchmark::Repetitions(int n)
{
CHECK(n > 0);
repetitions_ = n;
return this;
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
Benchmark *Benchmark::ReportAggregatesOnly(bool value)
{
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
}
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
Benchmark *Benchmark::DisplayAggregatesOnly(bool value)
{
// If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_Default);
aggregation_report_mode_ = static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_Default);
if (value) {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
} else {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
if (value)
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
}
else
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
return this;
}
Benchmark* Benchmark::MeasureProcessCPUTime() {
Benchmark *Benchmark::MeasureProcessCPUTime()
{
// Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true;
return this;
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
Benchmark *Benchmark::UseRealTime()
{
CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark* Benchmark::UseManualTime() {
CHECK(!use_real_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
Benchmark *Benchmark::UseManualTime()
{
CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
}
Benchmark* Benchmark::Complexity(BigO complexity) {
Benchmark *Benchmark::Complexity(BigO complexity)
{
complexity_ = complexity;
return this;
}
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
Benchmark *Benchmark::Complexity(BigOFunc *complexity)
{
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
}
Benchmark* Benchmark::ComputeStatistics(std::string name,
StatisticsFunc* statistics) {
Benchmark *Benchmark::ComputeStatistics(std::string name, StatisticsFunc *statistics)
{
statistics_.emplace_back(name, statistics);
return this;
}
Benchmark* Benchmark::Threads(int t) {
Benchmark *Benchmark::Threads(int t)
{
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
Benchmark *Benchmark::ThreadRange(int min_threads, int max_threads)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
@ -463,29 +509,37 @@ Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
return this;
}
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
int stride) {
Benchmark *Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) {
for (auto i = min_threads; i < max_threads; i += stride)
{
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
}
Benchmark* Benchmark::ThreadPerCpu() {
Benchmark *Benchmark::ThreadPerCpu()
{
thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
}
void Benchmark::SetName(const char* name) { name_ = name; }
void Benchmark::SetName(const char *name)
{
name_ = name;
}
int Benchmark::ArgsCnt() const {
if (args_.empty()) {
if (arg_names_.empty()) return -1;
int Benchmark::ArgsCnt() const
{
if (args_.empty())
{
if (arg_names_.empty())
return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
@ -495,11 +549,15 @@ int Benchmark::ArgsCnt() const {
// FunctionBenchmark
//=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); }
void FunctionBenchmark::Run(State &st)
{
func_(st);
}
} // end namespace internal
void ClearRegisteredBenchmarks() {
void ClearRegisteredBenchmarks()
{
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}

View File

@ -5,14 +5,15 @@
#include "check.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range.
template <typename T>
typename std::vector<T>::iterator
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
template <typename T> typename std::vector<T>::iterator AddPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
@ -22,20 +23,23 @@ AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult) {
if (i >= lo) {
for (T i = 1; i <= hi; i *= mult)
{
if (i >= lo)
{
dst->push_back(i);
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult) break;
if (i > kmax / mult)
break;
}
return dst->begin() + start_offset;
}
template <typename T>
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
template <typename T> void AddNegatedPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
@ -54,10 +58,9 @@ void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
std::reverse(it, dst->end());
}
template <typename T>
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
"Args type must be a signed integer");
template <typename T> void AddRange(std::vector<T> *dst, T lo, T hi, int mult)
{
static_assert(std::is_integral<T>::value && std::is_signed<T>::value, "Args type must be a signed integer");
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
@ -68,10 +71,12 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
// Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T.
if (lo == hi) return;
if (lo == hi)
return;
// Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi) {
if (lo + 1 == hi)
{
dst->push_back(hi);
return;
}
@ -81,22 +86,26 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values
if (lo_inner < 0) {
if (lo_inner < 0)
{
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0) {
if (lo <= 0 && hi >= 0)
{
dst->push_back(0);
}
// Insert positive values
if (hi_inner > 0) {
if (hi_inner > 0)
{
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Add "hi" (if different from last value).
if (hi != dst->back()) {
if (hi != dst->back())
{
dst->push_back(hi);
}
}

View File

@ -51,22 +51,24 @@
#include "thread_manager.h"
#include "thread_timer.h"
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
MemoryManager *memory_manager = nullptr;
namespace {
namespace
{
static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
const internal::ThreadManager::Result& results,
IterationCount memory_iterations,
BenchmarkReporter::Run CreateRunReport(const benchmark::internal::BenchmarkInstance &b,
const internal::ThreadManager::Result &results, IterationCount memory_iterations,
const MemoryManager::Result &memory_result, double seconds,
int64_t repetition_index) {
int64_t repetition_index)
{
// Create report about this benchmark run.
BenchmarkReporter::Run report;
@ -81,10 +83,14 @@ BenchmarkReporter::Run CreateRunReport(
report.repetition_index = repetition_index;
report.repetitions = b.repetitions;
if (!report.error_occurred) {
if (b.use_manual_time) {
if (!report.error_occurred)
{
if (b.use_manual_time)
{
report.real_accumulated_time = results.manual_time_used;
} else {
}
else
{
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
@ -94,12 +100,11 @@ BenchmarkReporter::Run CreateRunReport(
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0) {
if (memory_iterations > 0)
{
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) /
memory_iterations
: 0;
memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations : 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
@ -110,11 +115,9 @@ BenchmarkReporter::Run CreateRunReport(
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
void RunInThread(const BenchmarkInstance* b, IterationCount iters,
int thread_id, ThreadManager* manager) {
internal::ThreadTimer timer(
b->measure_process_cpu_time
? internal::ThreadTimer::CreateProcessCpuTime()
void RunInThread(const BenchmarkInstance *b, IterationCount iters, int thread_id, ThreadManager *manager)
{
internal::ThreadTimer timer(b->measure_process_cpu_time ? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
@ -132,32 +135,30 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
manager->NotifyThreadComplete();
}
class BenchmarkRunner {
class BenchmarkRunner
{
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance &b_,
std::vector<BenchmarkReporter::Run> *complexity_reports_)
: b(b_),
complexity_reports(*complexity_reports_),
: b(b_), complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations != 0),
pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1) {
repeats(b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations != 0), pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1)
{
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only =
FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified) {
(FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only = FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified)
{
run_results.display_report_aggregates_only =
(b.aggregation_report_mode &
internal::ARM_DisplayReportAggregatesOnly);
(b.aggregation_report_mode & internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
for (int repetition_num = 0; repetition_num < repeats; repetition_num++)
{
DoOneRepetition(repetition_num);
}
@ -165,16 +166,19 @@ class BenchmarkRunner {
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance) {
if ((b.complexity != oNone) && b.last_benchmark_instance)
{
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
additional_run_stats.begin(),
run_results.aggregates_only.insert(run_results.aggregates_only.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
}
RunResults&& get_results() { return std::move(run_results); }
RunResults &&get_results()
{
return std::move(run_results);
}
private:
RunResults run_results;
@ -192,21 +196,23 @@ class BenchmarkRunner {
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
struct IterationResults {
struct IterationResults
{
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations() {
IterationResults DoNIterations()
{
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads));
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
manager.get());
for (std::size_t ti = 0; ti < pool.size(); ++ti)
{
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), manager.get());
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
@ -215,7 +221,8 @@ class BenchmarkRunner {
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join();
for (std::thread &thread : pool)
thread.join();
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
@ -231,25 +238,29 @@ class BenchmarkRunner {
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
if (b.measure_process_cpu_time)
i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
<< i.results.real_time_used << "\n";
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time) {
if (b.use_manual_time)
{
i.seconds = i.results.manual_time_used;
} else if (b.use_real_time) {
}
else if (b.use_real_time)
{
i.seconds = i.results.real_time_used;
}
return i;
}
IterationCount PredictNumItersNeeded(const IterationResults& i) const {
IterationCount PredictNumItersNeeded(const IterationResults &i) const
{
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
@ -260,12 +271,12 @@ class BenchmarkRunner {
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0;
if (multiplier <= 1.0)
multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters),
static_cast<double>(i.iters) + 1.0)));
std::lround(std::max(multiplier * static_cast<double>(i.iters), static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
@ -273,12 +284,12 @@ class BenchmarkRunner {
return next_iters; // round up before conversion to integer.
}
bool ShouldReportIterationResults(const IterationResults& i) const {
bool ShouldReportIterationResults(const IterationResults &i) const
{
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ ||
i.iters >= kMaxIterations || // Too many iterations already.
return i.results.has_error_ || i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
@ -286,7 +297,8 @@ class BenchmarkRunner {
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
void DoOneRepetition(int64_t repetition_index) {
void DoOneRepetition(int64_t repetition_index)
{
const bool is_the_first_repetition = repetition_index == 0;
IterationResults i;
@ -296,7 +308,8 @@ class BenchmarkRunner {
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;) {
for (;;)
{
i = DoNIterations();
// Do we consider the results to be significant?
@ -304,25 +317,25 @@ class BenchmarkRunner {
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant = !is_the_first_repetition ||
has_explicit_iteration_count ||
ShouldReportIterationResults(i);
const bool results_are_significant =
!is_the_first_repetition || has_explicit_iteration_count || ShouldReportIterationResults(i);
if (results_are_significant) break; // Good, let's report them!
if (results_are_significant)
break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i);
assert(iters > i.iters &&
"if we did more iterations than we want to do the next time, "
assert(iters > i.iters && "if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr) {
if (memory_manager != nullptr)
{
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
@ -338,8 +351,7 @@ class BenchmarkRunner {
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result,
i.seconds, repetition_index);
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, repetition_index);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report);
@ -350,9 +362,9 @@ class BenchmarkRunner {
} // end namespace
RunResults RunBenchmark(
const benchmark::internal::BenchmarkInstance& b,
std::vector<BenchmarkReporter::Run>* complexity_reports) {
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports)
{
internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results();
}

View File

@ -26,13 +26,16 @@ DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
extern MemoryManager *memory_manager;
struct RunResults {
struct RunResults
{
std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
@ -40,8 +43,7 @@ struct RunResults {
bool file_report_aggregates_only = false;
};
RunResults RunBenchmark(
const benchmark::internal::BenchmarkInstance& b,
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports);
} // namespace internal

View File

@ -8,34 +8,42 @@
#include "internal_macros.h"
#include "log.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() {
inline AbortHandlerT *&GetAbortHandler()
{
static AbortHandlerT *handler = &std::abort;
return handler;
}
BENCHMARK_NORETURN inline void CallAbortHandler() {
BENCHMARK_NORETURN inline void CallAbortHandler()
{
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
class CheckHandler {
class CheckHandler
{
public:
CheckHandler(const char* check, const char* file, const char* func, int line)
: log_(GetErrorLogInstance()) {
log_ << file << ":" << line << ": " << func << ": Check `" << check
<< "' failed. ";
CheckHandler(const char *check, const char *file, const char *func, int line) : log_(GetErrorLogInstance())
{
log_ << file << ":" << line << ": " << func << ": Check `" << check << "' failed. ";
}
LogType& GetLog() { return log_; }
LogType &GetLog()
{
return log_;
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false)
{
log_ << std::endl;
CallAbortHandler();
}
@ -56,8 +64,7 @@ class CheckHandler {
#ifndef NDEBUG
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
.GetLog())
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__).GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif

View File

@ -25,23 +25,27 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#include <io.h>
#include <windows.h>
#else
#include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
#ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode;
#else
typedef const char *PlatformColorCode;
#endif
PlatformColorCode GetPlatformColorCode(LogColor color) {
PlatformColorCode GetPlatformColorCode(LogColor color)
{
#ifdef BENCHMARK_OS_WINDOWS
switch (color) {
switch (color)
{
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
@ -59,7 +63,8 @@ PlatformColorCode GetPlatformColorCode(LogColor color) {
return 0;
}
#else
switch (color) {
switch (color)
{
case COLOR_RED:
return "1";
case COLOR_GREEN:
@ -82,7 +87,8 @@ PlatformColorCode GetPlatformColorCode(LogColor color) {
} // end namespace
std::string FormatString(const char* msg, va_list args) {
std::string FormatString(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
@ -100,7 +106,8 @@ std::string FormatString(const char* msg, va_list args) {
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else {
else
{
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
@ -110,7 +117,8 @@ std::string FormatString(const char* msg, va_list args) {
}
}
std::string FormatString(const char* msg, ...) {
std::string FormatString(const char *msg, ...)
{
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
@ -118,15 +126,16 @@ std::string FormatString(const char* msg, ...) {
return tmp;
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args) {
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args)
{
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
@ -141,8 +150,7 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
@ -150,12 +158,14 @@ void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char *color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code);
if (color_code)
out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal() {
bool IsColorTerminal()
{
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
@ -165,17 +175,17 @@ bool IsColorTerminal() {
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char *const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color",
"screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
"xterm", "xterm-color", "xterm-256color", "screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", "linux", "cygwin",
};
const char *const term = getenv("TERM");
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
for (const char *candidate : SUPPORTED_TERM_VALUES)
{
if (term && 0 == strcmp(term, candidate))
{
term_supports_color = true;
break;
}

View File

@ -5,8 +5,10 @@
#include <iostream>
#include <string>
namespace benchmark {
enum LogColor {
namespace benchmark
{
enum LogColor
{
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
@ -20,8 +22,7 @@ enum LogColor {
std::string FormatString(const char *msg, va_list args);
std::string FormatString(const char *msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored

View File

@ -21,19 +21,23 @@
#include <iostream>
#include <limits>
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
bool ParseInt32(const std::string &src_text, const char *str, int32_t *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
@ -42,13 +46,13 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() ||
long_value == std::numeric_limits<long>::min() ||
if (long_value == std::numeric_limits<long>::max() || long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
) {
)
{
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
@ -61,13 +65,15 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) {
bool ParseDouble(const std::string &src_text, const char *str, double *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
@ -81,7 +87,8 @@ bool ParseDouble(const std::string& src_text, const char* str, double* value) {
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) {
static std::string FlagToEnvVar(const char *flag)
{
const std::string flag_str(flag);
std::string env_var;
@ -93,37 +100,39 @@ static std::string FlagToEnvVar(const char* flag) {
} // namespace
bool BoolFromEnv(const char* flag, bool default_val) {
bool BoolFromEnv(const char *flag, bool default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
}
int32_t Int32FromEnv(const char* flag, int32_t default_val) {
int32_t Int32FromEnv(const char *flag, int32_t default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
int32_t value = default_val;
if (value_str == nullptr ||
!ParseInt32(std::string("Environment variable ") + env_var, value_str,
&value)) {
if (value_str == nullptr || !ParseInt32(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
double DoubleFromEnv(const char* flag, double default_val) {
double DoubleFromEnv(const char *flag, double default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
double value = default_val;
if (value_str == nullptr ||
!ParseDouble(std::string("Environment variable ") + env_var, value_str,
&value)) {
if (value_str == nullptr || !ParseDouble(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
const char* StringFromEnv(const char* flag, const char* default_val) {
const char *StringFromEnv(const char *flag, const char *default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
@ -134,94 +143,108 @@ const char* StringFromEnv(const char* flag, const char* default_val) {
// part can be omitted.
//
// Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag,
bool def_optional) {
const char *ParseFlagValue(const char *str, const char *flag, bool def_optional)
{
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr;
if (str == nullptr || flag == nullptr)
return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
if (strncmp(str, flag_str.c_str(), flag_len) != 0)
return nullptr;
// Skips the flag name.
const char *flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end;
if (def_optional && (flag_end[0] == '\0'))
return flag_end;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=') return nullptr;
if (flag_end[0] != '=')
return nullptr;
// Returns the string after "=".
return flag_end + 1;
}
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
bool ParseBoolFlag(const char *str, const char *flag, bool *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
if (value_str == nullptr)
return false;
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
bool ParseInt32Flag(const char *str, const char *flag, int32_t *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str,
value);
return ParseInt32(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
bool ParseDoubleFlag(const char *str, const char *flag, double *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str,
value);
return ParseDouble(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
bool ParseStringFlag(const char *str, const char *flag, std::string *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
if (value_str == nullptr)
return false;
*value = value_str;
return true;
}
bool IsFlag(const char* str, const char* flag) {
bool IsFlag(const char *str, const char *flag)
{
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
if (value.size() == 1) {
bool IsTruthyFlagValue(const std::string &value)
{
if (value.size() == 1)
{
char v = value[0];
return isalnum(v) &&
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
} else if (!value.empty()) {
return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
}
else if (!value.empty())
{
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" ||
value_lower == "off");
} else
return !(value_lower == "false" || value_lower == "no" || value_lower == "off");
}
else
return true;
}

View File

@ -14,20 +14,13 @@
#define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val) \
bool FLAG(name) = \
benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \
int32_t FLAG(name) = \
benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
double FLAG(name) = \
benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
std::string FLAG(name) = \
benchmark::StringFromEnv(#name, default_val)
#define DEFINE_bool(name, default_val) bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
namespace benchmark {
namespace benchmark
{
// Parses a bool from the environment variable
// corresponding to the given flag.

View File

@ -17,17 +17,20 @@
#include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h"
#include "complexity.h"
#include <algorithm>
#include <cmath>
namespace benchmark {
namespace benchmark
{
// Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) {
BigOFunc *FittingCurve(BigO complexity)
{
static const double kLog2E = 1.44269504088896340736;
switch (complexity) {
switch (complexity)
{
case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared:
@ -36,13 +39,10 @@ BigOFunc* FittingCurve(BigO complexity) {
return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return
[](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
return [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
};
return [](IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); };
case o1:
default:
return [](IterationCount) { return 1.0; };
@ -50,8 +50,10 @@ BigOFunc* FittingCurve(BigO complexity) {
}
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) {
switch (complexity) {
std::string GetBigOString(BigO complexity)
{
switch (complexity)
{
case oN:
return "N";
case oNSquared:
@ -79,16 +81,16 @@ std::string GetBigOString(BigO complexity) {
// For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, BigOFunc *fitting_curve)
{
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
for (size_t i = 0; i < n.size(); ++i)
{
double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
@ -104,7 +106,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) {
for (size_t i = 0; i < n.size(); ++i)
{
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
@ -123,8 +126,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time, const BigO complexity) {
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, const BigO complexity)
{
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
@ -132,7 +135,8 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
LeastSq best_fit;
if (complexity == oAuto) {
if (complexity == oAuto)
{
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
@ -140,14 +144,18 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
for (const auto &fit : fit_curves)
{
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) {
if (current_fit.rms < best_fit.rms)
{
best_fit = current_fit;
best_fit.complexity = fit;
}
}
} else {
}
else
{
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
@ -155,12 +163,13 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports) {
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2) return results;
if (reports.size() < 2)
return results;
// Accumulators.
std::vector<int64_t> n;
@ -168,7 +177,8 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
for (const Run &run : reports)
{
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
@ -178,10 +188,13 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
LeastSq result_cpu;
LeastSq result_real;
if (reports[0].complexity == oLambda) {
if (reports[0].complexity == oLambda)
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
}
else
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}

View File

@ -23,12 +23,12 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
@ -39,8 +39,11 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected.
struct LeastSq {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
struct LeastSq
{
LeastSq() : coef(0.0), rms(0.0), complexity(oNone)
{
}
double coef;
double rms;

View File

@ -31,9 +31,11 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
bool ConsoleReporter::ReportContext(const Context& context) {
bool ConsoleReporter::ReportContext(const Context &context)
{
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
@ -41,9 +43,9 @@ bool ConsoleReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
GetErrorStream()
<< "Color printing is only supported for stdout on windows."
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream())
{
GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
}
@ -52,15 +54,21 @@ bool ConsoleReporter::ReportContext(const Context& context) {
return true;
}
void ConsoleReporter::PrintHeader(const Run& run) {
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
for(auto const& c : run.counters) {
void ConsoleReporter::PrintHeader(const Run &run)
{
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), "Benchmark", "Time",
"CPU", "Iterations");
if (!run.counters.empty())
{
if (output_options_ & OO_Tabular)
{
for (auto const &c : run.counters)
{
str += FormatString(" %10s", c.first.c_str());
}
} else {
}
else
{
str += " UserCounters...";
}
}
@ -68,16 +76,18 @@ void ConsoleReporter::PrintHeader(const Run& run) {
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& run : reports) {
void ConsoleReporter::ReportRuns(const std::vector<Run> &reports)
{
for (const auto &run : reports)
{
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) &&
(!internal::SameNames(run.counters, prev_counters_));
if (print_header) {
print_header |= (output_options_ & OO_Tabular) && (!internal::SameNames(run.counters, prev_counters_));
if (print_header)
{
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
@ -89,42 +99,43 @@ void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
}
}
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
...) {
static void IgnoreColorPrint(std::ostream &out, LogColor, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}
static std::string FormatTime(double time) {
static std::string FormatTime(double time)
{
// Align decimal places...
if (time < 1.0) {
if (time < 1.0)
{
return FormatString("%10.3f", time);
}
if (time < 10.0) {
if (time < 10.0)
{
return FormatString("%10.2f", time);
}
if (time < 100.0) {
if (time < 100.0)
{
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
}
void ConsoleReporter::PrintRunData(const Run& result) {
void ConsoleReporter::PrintRunData(const Run &result)
{
typedef void(PrinterFn)(std::ostream &, LogColor, const char *, ...);
auto &Out = GetOutputStream();
PrinterFn* printer = (output_options_ & OO_Color) ?
(PrinterFn*)ColorPrintf : IgnoreColorPrint;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str());
PrinterFn *printer = (output_options_ & OO_Color) ? (PrinterFn *)ColorPrintf : IgnoreColorPrint;
auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str());
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
if (result.error_occurred)
{
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
@ -134,40 +145,46 @@ void ConsoleReporter::PrintRunData(const Run& result) {
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o) {
if (result.report_big_o)
{
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
cpu_time * 100, "%");
} else {
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), cpu_time, big_o.c_str());
}
else if (result.report_rms)
{
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%");
}
else
{
const char *timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
cpu_time_str.c_str(), timeLabel);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, cpu_time_str.c_str(),
timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
if (!result.report_big_o && !result.report_rms)
{
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
for (auto &c : result.counters)
{
const std::size_t cNameLen = std::max(std::string::size_type(10), c.first.length());
auto const &s = HumanReadableNumber(c.second.value, c.second.oneK);
const char *unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) {
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
unit);
} else {
if (output_options_ & OO_Tabular)
{
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit);
}
else
{
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty()) {
if (!result.report_label.empty())
{
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}

View File

@ -14,62 +14,80 @@
#include "counter.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
double Finish(Counter const& c, IterationCount iterations, double cpu_time,
double num_threads) {
double Finish(Counter const &c, IterationCount iterations, double cpu_time, double num_threads)
{
double v = c.value;
if (c.flags & Counter::kIsRate) {
if (c.flags & Counter::kIsRate)
{
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads) {
if (c.flags & Counter::kAvgThreads)
{
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
if (c.flags & Counter::kIsIterationInvariant)
{
v *= iterations;
}
if (c.flags & Counter::kAvgIterations) {
if (c.flags & Counter::kAvgIterations)
{
v /= iterations;
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
if (c.flags & Counter::kInvert)
{ // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
double num_threads) {
for (auto& c : *l) {
void Finish(UserCounters *l, IterationCount iterations, double cpu_time, double num_threads)
{
for (auto &c : *l)
{
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
void Increment(UserCounters* l, UserCounters const& r) {
void Increment(UserCounters *l, UserCounters const &r)
{
// add counters present in both or just in *l
for (auto& c : *l) {
for (auto &c : *l)
{
auto it = r.find(c.first);
if (it != r.end()) {
if (it != r.end())
{
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const& tc : r) {
for (auto const &tc : r)
{
auto it = l->find(tc.first);
if (it == l->end()) {
if (it == l->end())
{
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const& l, UserCounters const& r) {
if (&l == &r) return true;
if (l.size() != r.size()) {
bool SameNames(UserCounters const &l, UserCounters const &r)
{
if (&l == &r)
return true;
if (l.size() != r.size())
{
return false;
}
for (auto const& c : l) {
if (r.find(c.first) == r.end()) {
for (auto const &c : l)
{
if (r.find(c.first) == r.end())
{
return false;
}
}

View File

@ -17,12 +17,13 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// these counter-related functions are hidden to reduce API surface.
namespace internal {
void Finish(UserCounters* l, IterationCount iterations, double time,
double num_threads);
namespace internal
{
void Finish(UserCounters *l, IterationCount iterations, double time, double num_threads);
void Increment(UserCounters *l, UserCounters const &r);
bool SameNames(UserCounters const &l, UserCounters const &r);
} // end namespace internal

View File

@ -28,39 +28,52 @@
// File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark {
namespace benchmark
{
namespace {
std::vector<std::string> elements = {
"name", "iterations", "real_time", "cpu_time",
namespace
{
std::vector<std::string> elements = {"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
std::string CsvEscape(const std::string & s) {
std::string CsvEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size() + 2);
for (char c : s) {
switch (c) {
case '"' : tmp += "\"\""; break;
default : tmp += c; break;
for (char c : s)
{
switch (c)
{
case '"':
tmp += "\"\"";
break;
default:
tmp += c;
break;
}
}
return '"' + tmp + '"';
}
bool CSVReporter::ReportContext(const Context& context) {
bool CSVReporter::ReportContext(const Context &context)
{
PrintBasicContext(&GetErrorStream(), context);
return true;
}
void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
void CSVReporter::ReportRuns(const std::vector<Run> &reports)
{
std::ostream &Out = GetOutputStream();
if (!printed_header_) {
if (!printed_header_)
{
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
@ -68,41 +81,49 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
}
// print the header
for (auto B = elements.begin(); B != elements.end();) {
for (auto B = elements.begin(); B != elements.end();)
{
Out << *B++;
if (B != elements.end()) Out << ",";
if (B != elements.end())
Out << ",";
}
for (auto B = user_counter_names_.begin();
B != user_counter_names_.end();) {
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();)
{
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
} else {
}
else
{
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";
<< "Counter named \"" << cnt.first << "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto& run : reports) {
for (const auto &run : reports)
{
PrintRunData(run);
}
}
void CSVReporter::PrintRunData(const Run& run) {
void CSVReporter::PrintRunData(const Run &run)
{
std::ostream &Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) {
if (run.error_occurred)
{
Out << std::string(elements.size() - 3, ',');
Out << "true,";
Out << CsvEscape(run.error_message) << "\n";
@ -110,7 +131,8 @@ void CSVReporter::PrintRunData(const Run& run) {
}
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) {
if (!run.report_big_o && !run.report_rms)
{
Out << run.iterations;
}
Out << ",";
@ -119,32 +141,42 @@ void CSVReporter::PrintRunData(const Run& run) {
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
if (run.report_big_o)
{
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
}
else if (!run.report_rms)
{
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end()) {
if (run.counters.find("bytes_per_second") != run.counters.end())
{
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end()) {
if (run.counters.find("items_per_second") != run.counters.end())
{
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
if (!run.report_label.empty())
{
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto& ucn : user_counter_names_) {
for (const auto &ucn : user_counter_names_)
{
auto it = run.counters.find(ucn);
if (it == run.counters.end()) {
if (it == run.counters.end())
{
Out << ",";
} else {
}
else
{
Out << "," << it->second;
}
}

View File

@ -50,15 +50,18 @@ extern "C" uint64_t __rdtsc();
#include <emscripten.h>
#endif
namespace benchmark {
namespace benchmark
{
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock {
namespace cycleclock
{
// This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
inline BENCHMARK_ALWAYS_INLINE int64_t Now()
{
#if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
@ -90,8 +93,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
return tb;
#else
uint32_t tbl, tbu0, tbu1;
asm volatile(
"mftbu %0\n"
asm volatile("mftbu %0\n"
"mftbl %1\n"
"mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
@ -149,9 +151,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
if (pmuseren & 1)
{ // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
if (pmcntenset & 0x80000000ul)
{ // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
@ -178,8 +182,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile(
"rdcycleh %0\n"
asm volatile("rdcycleh %0\n"
"rdcycle %1\n"
"rdcycleh %2\n"
"sub %0, %0, %2\n"

View File

@ -28,53 +28,80 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
std::string StrEscape(const std::string & s) {
std::string StrEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size());
for (char c : s) {
switch (c) {
case '\b': tmp += "\\b"; break;
case '\f': tmp += "\\f"; break;
case '\n': tmp += "\\n"; break;
case '\r': tmp += "\\r"; break;
case '\t': tmp += "\\t"; break;
case '\\': tmp += "\\\\"; break;
case '"' : tmp += "\\\""; break;
default : tmp += c; break;
for (char c : s)
{
switch (c)
{
case '\b':
tmp += "\\b";
break;
case '\f':
tmp += "\\f";
break;
case '\n':
tmp += "\\n";
break;
case '\r':
tmp += "\\r";
break;
case '\t':
tmp += "\\t";
break;
case '\\':
tmp += "\\\\";
break;
case '"':
tmp += "\\\"";
break;
default:
tmp += c;
break;
}
}
return tmp;
}
std::string FormatKV(std::string const& key, std::string const& value) {
std::string FormatKV(std::string const &key, std::string const &value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
std::string FormatKV(std::string const &key, const char *value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, bool value) {
std::string FormatKV(std::string const &key, bool value)
{
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::string FormatKV(std::string const &key, int64_t value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, IterationCount value) {
std::string FormatKV(std::string const &key, IterationCount value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, double value) {
std::string FormatKV(std::string const &key, double value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
@ -82,21 +109,24 @@ std::string FormatKV(std::string const& key, double value) {
ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity";
else {
const auto max_digits10 =
std::numeric_limits<decltype(value)>::max_digits10;
else
{
const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10)
<< value;
ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
}
return ss.str();
}
int64_t RoundDouble(double v) { return std::lround(v); }
int64_t RoundDouble(double v)
{
return std::lround(v);
}
} // end namespace
bool JSONReporter::ReportContext(const Context& context) {
bool JSONReporter::ReportContext(const Context &context)
{
std::ostream &out = GetOutputStream();
out << "{\n";
@ -111,44 +141,40 @@ bool JSONReporter::ReportContext(const Context& context) {
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) {
if (Context::executable_name)
{
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const &info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
<< ",\n";
out << indent
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
<< ",\n";
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) << ",\n";
out << indent << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) << ",\n";
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) {
for (size_t i = 0; i < info.caches.size(); ++i)
{
auto &CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
<< ",\n";
out << cache_indent
<< FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
<< "\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) << ",\n";
out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) << "\n";
out << indent << "}";
if (i != info.caches.size() - 1) out << ",";
if (i != info.caches.size() - 1)
out << ",";
out << "\n";
}
indent = std::string(4, ' ');
out << indent << "],\n";
out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
for (auto it = info.load_avg.begin(); it != info.load_avg.end();)
{
out << *it++;
if (it != info.load_avg.end()) out << ",";
if (it != info.load_avg.end())
out << ",";
}
out << "],\n";
@ -164,40 +190,48 @@ bool JSONReporter::ReportContext(const Context& context) {
return true;
}
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
if (reports.empty()) {
void JSONReporter::ReportRuns(std::vector<Run> const &reports)
{
if (reports.empty())
{
return;
}
std::string indent(4, ' ');
std::ostream &out = GetOutputStream();
if (!first_report_) {
if (!first_report_)
{
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
for (auto it = reports.begin(); it != reports.end(); ++it)
{
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
if (++it_cp != reports.end())
{
out << ",\n";
}
}
}
void JSONReporter::Finalize() {
void JSONReporter::Finalize()
{
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
void JSONReporter::PrintRunData(Run const &run)
{
std::string indent(6, ' ');
std::ostream &out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char * {
switch (run.run_type) {
switch (run.run_type)
{
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
@ -206,45 +240,52 @@ void JSONReporter::PrintRunData(Run const& run) {
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("repetition_index", run.repetition_index)
<< ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("repetition_index", run.repetition_index) << ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred) {
if (run.error_occurred)
{
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
if (!run.report_big_o && !run.report_rms)
{
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_big_o)
{
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
}
else if (run.report_rms)
{
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto& c : run.counters) {
for (auto &c : run.counters)
{
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result) {
if (run.has_memory_result)
{
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty()) {
if (!run.report_label.empty())
{
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';

View File

@ -6,58 +6,70 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
typedef std::basic_ostream<char> &(EndLType)(std::basic_ostream<char> &);
class LogType {
class LogType
{
friend LogType &GetNullLogInstance();
friend LogType &GetErrorLogInstance();
// FIXME: Add locking to output.
template <class Tp>
friend LogType& operator<<(LogType&, Tp const&);
template <class Tp> friend LogType &operator<<(LogType &, Tp const &);
friend LogType &operator<<(LogType &, EndLType *);
private:
LogType(std::ostream* out) : out_(out) {}
LogType(std::ostream *out) : out_(out)
{
}
std::ostream *out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
};
template <class Tp>
LogType& operator<<(LogType& log, Tp const& value) {
if (log.out_) {
template <class Tp> LogType &operator<<(LogType &log, Tp const &value)
{
if (log.out_)
{
*log.out_ << value;
}
return log;
}
inline LogType& operator<<(LogType& log, EndLType* m) {
if (log.out_) {
inline LogType &operator<<(LogType &log, EndLType *m)
{
if (log.out_)
{
*log.out_ << m;
}
return log;
}
inline int& LogLevel() {
inline int &LogLevel()
{
static int log_level = 0;
return log_level;
}
inline LogType& GetNullLogInstance() {
inline LogType &GetNullLogInstance()
{
static LogType log(nullptr);
return log;
}
inline LogType& GetErrorLogInstance() {
inline LogType &GetErrorLogInstance()
{
static LogType log(&std::clog);
return log;
}
inline LogType& GetLogInstanceForLevel(int level) {
if (level <= LogLevel()) {
inline LogType &GetLogInstanceForLevel(int level)
{
if (level <= LogLevel())
{
return GetErrorLogInstance();
}
return GetNullLogInstance();

View File

@ -22,49 +22,38 @@
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark {
namespace benchmark
{
typedef std::condition_variable Condition;
@ -72,49 +61,76 @@ typedef std::condition_variable Condition;
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex {
class CAPABILITY("mutex") Mutex
{
public:
Mutex() {}
Mutex()
{
}
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
void lock() ACQUIRE()
{
mut_.lock();
}
void unlock() RELEASE()
{
mut_.unlock();
}
std::mutex &native_handle()
{
return mut_;
}
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
class SCOPED_CAPABILITY MutexLock
{
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
MutexLock(Mutex &m) ACQUIRE(m) : ml_(m.native_handle())
{
}
~MutexLock() RELEASE()
{
}
MutexLockImp &native_handle()
{
return ml_;
}
private:
MutexLockImp ml_;
};
class Barrier {
class Barrier
{
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
Barrier(int num_threads) : running_threads_(num_threads)
{
}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool wait() EXCLUDES(lock_)
{
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread) phase_condition_.notify_all();
if (last_thread)
phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) {
void removeThread() EXCLUDES(lock_)
{
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0) phase_condition_.notify_all();
if (entered_ != 0)
phase_condition_.notify_all();
}
private:
@ -129,10 +145,12 @@ class Barrier {
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
bool createBarrier(MutexLock &ml) REQUIRES(lock_)
{
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
if (entered_ < running_threads_)
{
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
@ -140,7 +158,8 @@ class Barrier {
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
if (phase_number_ > phase_number_cp)
return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier

View File

@ -54,13 +54,17 @@
#include "check.h"
namespace benchmark {
namespace benchmark
{
// A wrapper around the POSIX regular expression API that provides automatic
// cleanup
class Regex {
class Regex
{
public:
Regex() : init_(false) {}
Regex() : init_(false)
{
}
~Regex();
@ -87,18 +91,22 @@ class Regex {
#if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) {
inline bool Regex::Init(const std::string &spec, std::string *error)
{
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning
#else
try {
try
{
#endif
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
}
catch (const std::regex_error& e) {
if (error) {
catch (const std::regex_error &e)
{
if (error)
{
*error = e.what();
}
}
@ -106,20 +114,27 @@ catch (const std::regex_error& e) {
return init_;
}
inline Regex::~Regex() {}
inline Regex::~Regex()
{
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return std::regex_search(str, re_);
}
#else
inline bool Regex::Init(const std::string& spec, std::string* error) {
inline bool Regex::Init(const std::string &spec, std::string *error)
{
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (error) {
if (ec != 0)
{
if (error)
{
size_t needed = regerror(ec, &re_, nullptr, 0);
char *errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
@ -139,14 +154,18 @@ inline bool Regex::Init(const std::string& spec, std::string* error) {
return true;
}
inline Regex::~Regex() {
if (init_) {
inline Regex::~Regex()
{
if (init_)
{
regfree(&re_);
}
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;

View File

@ -24,15 +24,19 @@
#include "check.h"
#include "string_util.h"
namespace benchmark {
namespace benchmark
{
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr)
{
}
BenchmarkReporter::~BenchmarkReporter() {}
BenchmarkReporter::~BenchmarkReporter()
{
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Context const &context) {
void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context)
{
CHECK(out) << "cannot be null";
auto &Out = *out;
@ -42,29 +46,33 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0) {
if (info.caches.size() != 0)
{
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) {
Out << " L" << CInfo.level << " " << CInfo.type << " "
<< (CInfo.size / 1024) << " KiB";
for (auto &CInfo : info.caches)
{
Out << " L" << CInfo.level << " " << CInfo.type << " " << (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
}
}
if (!info.load_avg.empty()) {
if (!info.load_avg.empty())
{
Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
for (auto It = info.load_avg.begin(); It != info.load_avg.end();)
{
Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end()) Out << ", ";
if (It != info.load_avg.end())
Out << ", ";
}
Out << "\n";
}
if (info.scaling_enabled) {
if (info.scaling_enabled)
{
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
@ -79,26 +87,33 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
// No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context()
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get())
{
}
std::string BenchmarkReporter::Run::benchmark_name() const {
std::string BenchmarkReporter::Run::benchmark_name() const
{
std::string name = run_name.str();
if (run_type == RT_Aggregate) {
if (run_type == RT_Aggregate)
{
name += "_" + aggregate_name;
}
return name;
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double BenchmarkReporter::Run::GetAdjustedRealTime() const
{
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
double BenchmarkReporter::Run::GetAdjustedCPUTime() const
{
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}

View File

@ -24,15 +24,21 @@
#include <windows.h>
#endif
namespace benchmark {
namespace benchmark
{
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
void SleepForSeconds(double seconds) {
void SleepForMilliseconds(int milliseconds)
{
Sleep(milliseconds);
}
void SleepForSeconds(double seconds)
{
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
void SleepForMicroseconds(int microseconds)
{
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
@ -40,11 +46,13 @@ void SleepForMicroseconds(int microseconds) {
; // Ignore signals and wait for the full interval to elapse.
}
void SleepForMilliseconds(int milliseconds) {
void SleepForMilliseconds(int milliseconds)
{
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
}
void SleepForSeconds(double seconds) {
void SleepForSeconds(double seconds)
{
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
}
#endif // BENCHMARK_OS_WINDOWS

View File

@ -1,7 +1,8 @@
#ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_
namespace benchmark {
namespace benchmark
{
const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;

View File

@ -15,27 +15,30 @@
#include "benchmark/benchmark.h"
#include "check.h"
#include "statistics.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <string>
#include <vector>
#include "check.h"
#include "statistics.h"
namespace benchmark {
namespace benchmark
{
auto StatisticsSum = [](const std::vector<double>& v) {
return std::accumulate(v.begin(), v.end(), 0.0);
};
auto StatisticsSum = [](const std::vector<double> &v) { return std::accumulate(v.begin(), v.end(), 0.0); };
double StatisticsMean(const std::vector<double>& v) {
if (v.empty()) return 0.0;
double StatisticsMean(const std::vector<double> &v)
{
if (v.empty())
return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
}
double StatisticsMedian(const std::vector<double>& v) {
if (v.size() < 3) return StatisticsMean(v);
double StatisticsMedian(const std::vector<double> &v)
{
if (v.size() < 3)
return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2;
@ -45,45 +48,47 @@ double StatisticsMedian(const std::vector<double>& v) {
// if yes, then center is the median
// it no, then we are looking for the average between center and the value
// before
if (v.size() % 2 == 1) return *center;
if (v.size() % 2 == 1)
return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
}
// Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double>& v) {
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
};
auto SumSquares = [](const std::vector<double> &v) { return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); };
auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations
if (dat < 0.0) return 0.0;
if (dat < 0.0)
return 0.0;
return std::sqrt(dat);
};
double StatisticsStdDev(const std::vector<double>& v) {
double StatisticsStdDev(const std::vector<double> &v)
{
const auto mean = StatisticsMean(v);
if (v.empty()) return mean;
if (v.empty())
return mean;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0;
if (v.size() == 1)
return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
}
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports) {
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count =
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; });
auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const &run) { return run.error_occurred; });
if (reports.size() - error_count < 2) {
if (reports.size() - error_count < 2)
{
// We don't report aggregated data if there was a single run.
return results;
}
@ -99,33 +104,42 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
struct CounterStat
{
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const& r : reports) {
for (auto const& cnt : r.counters) {
for (Run const &r : reports)
{
for (auto const &cnt : r.counters)
{
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) {
if (it == counter_stats.end())
{
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
} else {
}
else
{
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
for (Run const &run : reports)
{
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
if (run.error_occurred)
continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const& cnt : run.counters) {
for (auto const &cnt : run.counters)
{
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
@ -134,17 +148,19 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != report_label) {
for (std::size_t i = 1; i < reports.size(); i++)
{
if (reports[i].report_label != report_label)
{
report_label = "";
break;
}
}
const double iteration_rescale_factor =
double(reports.size()) / double(run_iterations);
const double iteration_rescale_factor = double(reports.size()) / double(run_iterations);
for (const auto& Stat : *reports[0].statistics) {
for (const auto &Stat : *reports[0].statistics)
{
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
@ -176,11 +192,11 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
data.time_unit = reports[0].time_unit;
// user counters
for (auto const& kv : counter_stats) {
for (auto const &kv : counter_stats)
{
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
counter_stats[kv.first].c.oneK);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}

View File

@ -20,13 +20,13 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports);
double StatisticsMean(const std::vector<double> &v);
double StatisticsMedian(const std::vector<double> &v);

View File

@ -12,8 +12,10 @@
#include "arraysize.h"
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY";
@ -23,38 +25,40 @@ const char kBigIECUnits[] = "KMGTPEZY";
const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), "SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision,
double one_k, std::string* mantissa,
int64_t* exponent) {
void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string *mantissa,
int64_t *exponent)
{
std::stringstream mantissa_stream;
if (val < 0) {
if (val < 0)
{
mantissa_stream << "-";
val = -val;
}
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double adjusted_threshold = std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold) {
if (val > big_threshold)
{
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i)
{
scaled /= one_k;
if (scaled <= big_threshold) {
if (scaled <= big_threshold)
{
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
@ -63,13 +67,18 @@ void ToExponentAndMantissa(double val, double thresh, int precision,
}
mantissa_stream << val;
*exponent = 0;
} else if (val < small_threshold) {
}
else if (val < small_threshold)
{
// Negative powers
if (val < simple_threshold) {
if (val < simple_threshold)
{
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i)
{
scaled *= one_k;
if (scaled >= small_threshold) {
if (scaled >= small_threshold)
{
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
@ -79,53 +88,59 @@ void ToExponentAndMantissa(double val, double thresh, int precision,
}
mantissa_stream << val;
*exponent = 0;
} else {
}
else
{
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
}
std::string ExponentToPrefix(int64_t exponent, bool iec) {
if (exponent == 0) return "";
std::string ExponentToPrefix(int64_t exponent, bool iec)
{
if (exponent == 0)
return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return "";
if (index >= kUnitsSize)
return "";
const char* array =
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
const char *array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
}
std::string ToBinaryStringFullySpecified(double value, double threshold,
int precision, double one_k = 1024.0) {
std::string ToBinaryStringFullySpecified(double value, double threshold, int precision, double one_k = 1024.0)
{
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
&exponent);
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
} // end namespace
void AppendHumanReadable(int n, std::string* str) {
void AppendHumanReadable(int n, std::string *str)
{
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
}
std::string HumanReadableNumber(double n, double one_k) {
std::string HumanReadableNumber(double n, double one_k)
{
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StrFormatImp(const char* msg, va_list args) {
std::string StrFormatImp(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
@ -141,7 +156,8 @@ std::string StrFormatImp(const char* msg, va_list args) {
va_end(args_cp);
// handle empty expansion
if (ret == 0) return std::string{};
if (ret == 0)
return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
@ -155,7 +171,8 @@ std::string StrFormatImp(const char* msg, va_list args) {
return std::string(buff_ptr.get());
}
std::string StrFormat(const char* format, ...) {
std::string StrFormat(const char *format, ...)
{
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
@ -170,7 +187,8 @@ std::string StrFormat(const char* format, ...) {
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos, int base) {
unsigned long stoul(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
@ -184,20 +202,23 @@ unsigned long stoul(const std::string& str, size_t* pos, int base) {
errno = oldErrno;
/* Check for errors and return */
if (strtoulErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of unsigned long");
} else if (strEnd == strStart || strtoulErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
if (strtoulErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of unsigned long");
}
if (pos != nullptr) {
else if (strEnd == strStart || strtoulErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
int stoi(const std::string& str, size_t* pos, int base) {
int stoi(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
@ -211,20 +232,23 @@ int stoi(const std::string& str, size_t* pos, int base) {
errno = oldErrno;
/* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtolErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
if (strtolErrno == ERANGE || long(int(result)) != result)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
if (pos != nullptr) {
else if (strEnd == strStart || strtolErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
}
double stod(const std::string& str, size_t* pos) {
double stod(const std::string &str, size_t *pos)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
@ -238,14 +262,16 @@ double stod(const std::string& str, size_t* pos) {
errno = oldErrno;
/* Check for errors and return */
if (strtodErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtodErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
if (strtodErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
if (pos != nullptr) {
else if (strEnd == strStart || strtodErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;

View File

@ -1,12 +1,13 @@
#ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_
#include "internal_macros.h"
#include <sstream>
#include <string>
#include <utility>
#include "internal_macros.h"
namespace benchmark {
namespace benchmark
{
void AppendHumanReadable(int n, std::string *str);
@ -20,18 +21,19 @@ __attribute__((format(printf, 1, 2)))
std::string
StrFormat(const char *format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
inline std::ostream &StrCatImp(std::ostream &out) BENCHMARK_NOEXCEPT
{
return out;
}
template <class First, class... Rest>
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
template <class First, class... Rest> inline std::ostream &StrCatImp(std::ostream &out, First &&f, Rest &&...rest)
{
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
inline std::string StrCat(Args&&... args) {
template <class... Args> inline std::string StrCat(Args &&...args)
{
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
return ss.str();
@ -44,14 +46,13 @@ inline std::string StrCat(Args&&... args) {
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos = nullptr,
int base = 10);
unsigned long stoul(const std::string &str, size_t *pos = nullptr, int base = 10);
int stoi(const std::string &str, size_t *pos = nullptr, int base = 10);
double stod(const std::string &str, size_t *pos = nullptr);
#else
using std::stoul;
using std::stoi;
using std::stod;
using std::stoi;
using std::stoul;
#endif
} // end namespace benchmark

View File

@ -17,9 +17,9 @@
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <codecvt>
#include <versionhelpers.h>
#include <windows.h>
#include <codecvt>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
@ -28,8 +28,8 @@
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_OPENBSD
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
@ -54,9 +54,9 @@
#include <iostream>
#include <iterator>
#include <limits>
#include <locale>
#include <memory>
#include <sstream>
#include <locale>
#include "check.h"
#include "cycleclock.h"
@ -65,19 +65,24 @@
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
void PrintImp(std::ostream& out) { out << std::endl; }
void PrintImp(std::ostream &out)
{
out << std::endl;
}
template <class First, class... Rest>
void PrintImp(std::ostream& out, First&& f, Rest&&... rest) {
template <class First, class... Rest> void PrintImp(std::ostream &out, First &&f, Rest &&...rest)
{
out << std::forward<First>(f);
PrintImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
template <class... Args> BENCHMARK_NORETURN void PrintErrorAndDie(Args &&...args)
{
PrintImp(std::cerr, std::forward<Args>(args)...);
std::exit(EXIT_FAILURE);
}
@ -86,7 +91,8 @@ BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) {
/// ValueUnion - A type used to correctly alias the byte-for-byte output of
/// `sysctl` with the result type it's to be interpreted as.
struct ValueUnion {
struct ValueUnion
{
union DataT {
uint32_t uint32_value;
uint64_t uint64_value;
@ -100,21 +106,34 @@ struct ValueUnion {
DataPtr Buff;
public:
ValueUnion() : Size(0), Buff(nullptr, &std::free) {}
ValueUnion() : Size(0), Buff(nullptr, &std::free)
{
}
explicit ValueUnion(size_t BuffSize)
: Size(sizeof(DataT) + BuffSize),
Buff(::new (std::malloc(Size)) DataT(), &std::free) {}
: Size(sizeof(DataT) + BuffSize), Buff(::new (std::malloc(Size)) DataT(), &std::free)
{
}
ValueUnion(ValueUnion &&other) = default;
explicit operator bool() const { return bool(Buff); }
explicit operator bool() const
{
return bool(Buff);
}
char* data() const { return Buff->bytes; }
char *data() const
{
return Buff->bytes;
}
std::string GetAsString() const { return std::string(data()); }
std::string GetAsString() const
{
return std::string(data());
}
int64_t GetAsInteger() const {
int64_t GetAsInteger() const
{
if (Size == sizeof(Buff->uint32_value))
return static_cast<int32_t>(Buff->uint32_value);
else if (Size == sizeof(Buff->uint64_value))
@ -122,7 +141,8 @@ struct ValueUnion {
BENCHMARK_UNREACHABLE();
}
uint64_t GetAsUnsigned() const {
uint64_t GetAsUnsigned() const
{
if (Size == sizeof(Buff->uint32_value))
return Buff->uint32_value;
else if (Size == sizeof(Buff->uint64_value))
@ -130,8 +150,8 @@ struct ValueUnion {
BENCHMARK_UNREACHABLE();
}
template <class T, int N>
std::array<T, N> GetAsArray() {
template <class T, int N> std::array<T, N> GetAsArray()
{
const int ArrSize = sizeof(T) * N;
CHECK_LE(ArrSize, Size);
std::array<T, N> Arr;
@ -140,21 +160,27 @@ struct ValueUnion {
}
};
ValueUnion GetSysctlImp(std::string const& Name) {
ValueUnion GetSysctlImp(std::string const &Name)
{
#if defined BENCHMARK_OS_OPENBSD
int mib[2];
mib[0] = CTL_HW;
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed"))
{
ValueUnion buff(sizeof(int));
if (Name == "hw.ncpu") {
if (Name == "hw.ncpu")
{
mib[1] = HW_NCPU;
} else {
}
else
{
mib[1] = HW_CPUSPEED;
}
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1)
{
return ValueUnion();
}
return buff;
@ -173,45 +199,52 @@ ValueUnion GetSysctlImp(std::string const& Name) {
}
BENCHMARK_MAYBE_UNUSED
bool GetSysctl(std::string const& Name, std::string* Out) {
bool GetSysctl(std::string const &Name, std::string *Out)
{
Out->clear();
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
if (!Buff)
return false;
Out->assign(Buff.data());
return true;
}
template <class Tp,
class = typename std::enable_if<std::is_integral<Tp>::value>::type>
bool GetSysctl(std::string const& Name, Tp* Out) {
template <class Tp, class = typename std::enable_if<std::is_integral<Tp>::value>::type>
bool GetSysctl(std::string const &Name, Tp *Out)
{
*Out = 0;
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
if (!Buff)
return false;
*Out = static_cast<Tp>(Buff.GetAsUnsigned());
return true;
}
template <class Tp, size_t N>
bool GetSysctl(std::string const& Name, std::array<Tp, N>* Out) {
template <class Tp, size_t N> bool GetSysctl(std::string const &Name, std::array<Tp, N> *Out)
{
auto Buff = GetSysctlImp(Name);
if (!Buff) return false;
if (!Buff)
return false;
*Out = Buff.GetAsArray<Tp, N>();
return true;
}
#endif
template <class ArgT>
bool ReadFromFile(std::string const& fname, ArgT* arg) {
template <class ArgT> bool ReadFromFile(std::string const &fname, ArgT *arg)
{
*arg = ArgT();
std::ifstream f(fname.c_str());
if (!f.is_open()) return false;
if (!f.is_open())
return false;
f >> *arg;
return f.good();
}
bool CpuScalingEnabled(int num_cpus) {
bool CpuScalingEnabled(int num_cpus)
{
// We don't have a valid CPU count, so don't even bother.
if (num_cpus <= 0) return false;
if (num_cpus <= 0)
return false;
#ifdef BENCHMARK_OS_QNX
return false;
#endif
@ -220,16 +253,18 @@ bool CpuScalingEnabled(int num_cpus) {
// local file system. If reading the exported files fails, then we may not be
// running on Linux, so we silently ignore all the read errors.
std::string res;
for (int cpu = 0; cpu < num_cpus; ++cpu) {
std::string governor_file =
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
if (ReadFromFile(governor_file, &res) && res != "performance") return true;
for (int cpu = 0; cpu < num_cpus; ++cpu)
{
std::string governor_file = StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
if (ReadFromFile(governor_file, &res) && res != "performance")
return true;
}
#endif
return false;
}
int CountSetBitsInCPUMap(std::string Val) {
int CountSetBitsInCPUMap(std::string Val)
{
auto CountBits = [](std::string Part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
Part = "0x" + Part;
@ -238,35 +273,40 @@ int CountSetBitsInCPUMap(std::string Val) {
};
size_t Pos;
int total = 0;
while ((Pos = Val.find(',')) != std::string::npos) {
while ((Pos = Val.find(',')) != std::string::npos)
{
total += CountBits(Val.substr(0, Pos));
Val = Val.substr(Pos + 1);
}
if (!Val.empty()) {
if (!Val.empty())
{
total += CountBits(Val);
}
return total;
}
BENCHMARK_MAYBE_UNUSED
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS()
{
std::vector<CPUInfo::CacheInfo> res;
std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
int Idx = 0;
while (true) {
while (true)
{
CPUInfo::CacheInfo info;
std::string FPath = StrCat(dir, "index", Idx++, "/");
std::ifstream f(StrCat(FPath, "size").c_str());
if (!f.is_open()) break;
if (!f.is_open())
break;
std::string suffix;
f >> info.size;
if (f.fail())
PrintErrorAndDie("Failed while reading file '", FPath, "size'");
if (f.good()) {
if (f.good())
{
f >> suffix;
if (f.bad())
PrintErrorAndDie(
"Invalid cache size format: failed to read size suffix");
PrintErrorAndDie("Invalid cache size format: failed to read size suffix");
else if (f && suffix != "K")
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
else if (suffix == "K")
@ -287,12 +327,14 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS() {
}
#ifdef BENCHMARK_OS_MACOSX
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX()
{
std::vector<CPUInfo::CacheInfo> res;
std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
GetSysctl("hw.cacheconfig", &CacheCounts);
struct {
struct
{
std::string name;
std::string type;
int level;
@ -301,9 +343,11 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
{"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
for (auto& C : Cases) {
for (auto &C : Cases)
{
int val;
if (!GetSysctl(C.name, &val)) continue;
if (!GetSysctl(C.name, &val))
continue;
CPUInfo::CacheInfo info;
info.type = C.type;
info.level = C.level;
@ -314,7 +358,8 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
return res;
}
#elif defined(BENCHMARK_OS_WINDOWS)
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows()
{
std::vector<CPUInfo::CacheInfo> res;
DWORD buffer_size = 0;
using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
@ -324,24 +369,27 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
GetLogicalProcessorInformation(nullptr, &buffer_size);
UPtr buff((PInfo *)malloc(buffer_size), &std::free);
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ",
GetLastError());
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", GetLastError());
PInfo *it = buff.get();
PInfo *end = buff.get() + (buffer_size / sizeof(PInfo));
for (; it != end; ++it) {
if (it->Relationship != RelationCache) continue;
for (; it != end; ++it)
{
if (it->Relationship != RelationCache)
continue;
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
BitSet B(it->ProcessorMask);
// To prevent duplicates, only consider caches where CPU 0 is specified
if (!B.test(0)) continue;
if (!B.test(0))
continue;
CInfo *Cache = &it->Cache;
CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(B.count());
C.level = Cache->Level;
C.size = Cache->Size;
switch (Cache->Type) {
switch (Cache->Type)
{
case CacheUnified:
C.type = "Unified";
break;
@ -363,14 +411,17 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
return res;
}
#elif BENCHMARK_OS_QNX
std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX()
{
std::vector<CPUInfo::CacheInfo> res;
struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize;
for(int i = 0; i < num; ++i ) {
for (int i = 0; i < num; ++i)
{
CPUInfo::CacheInfo info;
switch (cache->flags){
switch (cache->flags)
{
case CACHE_FLAG_INSTR:
info.type = "Instruction";
info.level = 1;
@ -398,7 +449,8 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
}
#endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
std::vector<CPUInfo::CacheInfo> GetCacheSizes()
{
#ifdef BENCHMARK_OS_MACOSX
return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS)
@ -410,7 +462,8 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
#endif
}
std::string GetSystemName() {
std::string GetSystemName()
{
#if defined(BENCHMARK_OS_WINDOWS)
std::string str;
const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1;
@ -445,15 +498,18 @@ std::string GetSystemName() {
#endif // def HOST_NAME_MAX
char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0) return std::string("");
if (retVal != 0)
return std::string("");
return std::string(hostname);
#endif // Catch-all POSIX block.
}
int GetNumCPUs() {
int GetNumCPUs()
{
#ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1;
if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU;
if (GetSysctl("hw.ncpu", &NumCPU))
return NumCPU;
fprintf(stderr, "Err: %s\n", strerror(errno));
std::exit(EXIT_FAILURE);
#elif defined(BENCHMARK_OS_WINDOWS)
@ -468,10 +524,9 @@ int GetNumCPUs() {
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
if (NumCPU < 0) {
fprintf(stderr,
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
strerror(errno));
if (NumCPU < 0)
{
fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno));
}
return NumCPU;
#elif defined(BENCHMARK_OS_QNX)
@ -480,44 +535,53 @@ int GetNumCPUs() {
int NumCPUs = 0;
int MaxID = -1;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) {
if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n";
return -1;
}
const std::string Key = "processor";
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
while (std::getline(f, ln))
{
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':');
std::string value;
#if defined(__s390__)
// s390 has another format in /proc/cpuinfo
// it needs to be parsed differently
if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
if (SplitIdx != std::string::npos)
value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1);
#else
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0)
{
NumCPUs++;
if (!value.empty()) {
if (!value.empty())
{
int CurID = benchmark::stoi(value);
MaxID = std::max(CurID, MaxID);
}
}
}
if (f.bad()) {
if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n";
return -1;
}
if (!f.eof()) {
if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return -1;
}
f.close();
if ((MaxID + 1) != NumCPUs) {
fprintf(stderr,
"CPU ID assignments in /proc/cpuinfo seem messed up."
if ((MaxID + 1) != NumCPUs)
{
fprintf(stderr, "CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n");
}
return NumCPUs;
@ -525,7 +589,8 @@ int GetNumCPUs() {
BENCHMARK_UNREACHABLE();
}
double GetCPUCyclesPerSecond() {
double GetCPUCyclesPerSecond()
{
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
long freq;
@ -538,8 +603,8 @@ double GetCPUCyclesPerSecond() {
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
// If CPU scaling is in effect, we want to use the *maximum* frequency,
// not whatever CPU speed some random processor happens to be using now.
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", &freq))
{
// The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000".
return freq * 1000.0;
@ -549,45 +614,57 @@ double GetCPUCyclesPerSecond() {
double bogo_clock = error_value;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open()) {
if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n";
return error_value;
}
auto startsWithKey = [](std::string const &Value, std::string const &Key) {
if (Key.size() > Value.size()) return false;
auto Cmp = [&](char X, char Y) {
return std::tolower(X) == std::tolower(Y);
};
if (Key.size() > Value.size())
return false;
auto Cmp = [&](char X, char Y) { return std::tolower(X) == std::tolower(Y); };
return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
};
std::string ln;
while (std::getline(f, ln)) {
if (ln.empty()) continue;
while (std::getline(f, ln))
{
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':');
std::string value;
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz")) {
if (!value.empty()) {
if (startsWithKey(ln, "cpu MHz"))
{
if (!value.empty())
{
double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0) return cycles_per_second;
if (cycles_per_second > 0)
return cycles_per_second;
}
} else if (startsWithKey(ln, "bogomips")) {
if (!value.empty()) {
}
else if (startsWithKey(ln, "bogomips"))
{
if (!value.empty())
{
bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0) bogo_clock = error_value;
if (bogo_clock < 0.0)
bogo_clock = error_value;
}
}
}
if (f.bad()) {
if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n";
return error_value;
}
if (!f.eof()) {
if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return error_value;
}
@ -595,7 +672,8 @@ double GetCPUCyclesPerSecond() {
// If we found the bogomips clock, but nothing better, we'll use it (but
// we're not happy about it); otherwise, fallback to the rough estimation
// below.
if (bogo_clock >= 0.0) return bogo_clock;
if (bogo_clock >= 0.0)
return bogo_clock;
#elif defined BENCHMARK_HAS_SYSCTL
constexpr auto *FreqStr =
@ -608,56 +686,56 @@ double GetCPUCyclesPerSecond() {
#endif
unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD
if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
if (GetSysctl(FreqStr, &hz))
return hz * 1000000;
#else
if (GetSysctl(FreqStr, &hz)) return hz;
if (GetSysctl(FreqStr, &hz))
return hz;
#endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
FreqStr, strerror(errno));
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", FreqStr, strerror(errno));
#elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
DWORD data, data_size = sizeof(data);
if (IsWindowsXPOrGreater() &&
SUCCEEDED(
SHGetValueA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"~MHz", nullptr, &data, &data_size)))
return static_cast<double>((int64_t)data *
(int64_t)(1000 * 1000)); // was mhz
SUCCEEDED(SHGetValueA(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", "~MHz", nullptr,
&data, &data_size)))
return static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
#elif defined(BENCHMARK_OS_SOLARIS)
kstat_ctl_t *kc = kstat_open();
if (!kc) {
if (!kc)
{
std::cerr << "failed to open /dev/kstat\n";
return -1;
}
kstat_t *ksp = kstat_lookup(kc, (char *)"cpu_info", -1, (char *)"cpu_info0");
if (!ksp) {
if (!ksp)
{
std::cerr << "failed to lookup in /dev/kstat\n";
return -1;
}
if (kstat_read(kc, ksp, NULL) < 0) {
if (kstat_read(kc, ksp, NULL) < 0)
{
std::cerr << "failed to read from /dev/kstat\n";
return -1;
}
kstat_named_t *knp =
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
if (!knp) {
kstat_named_t *knp = (kstat_named_t *)kstat_data_lookup(ksp, (char *)"current_clock_Hz");
if (!knp)
{
std::cerr << "failed to lookup data in /dev/kstat\n";
return -1;
}
if (knp->data_type != KSTAT_DATA_UINT64) {
std::cerr << "current_clock_Hz is of unexpected data type: "
<< knp->data_type << "\n";
if (knp->data_type != KSTAT_DATA_UINT64)
{
std::cerr << "current_clock_Hz is of unexpected data type: " << knp->data_type << "\n";
return -1;
}
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
#elif defined(BENCHMARK_OS_QNX)
return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) *
(int64_t)(1000 * 1000));
return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * (int64_t)(1000 * 1000));
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;
@ -666,16 +744,20 @@ double GetCPUCyclesPerSecond() {
return static_cast<double>(cycleclock::Now() - start_ticks);
}
std::vector<double> GetLoadAvg() {
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__)
std::vector<double> GetLoadAvg()
{
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD) && \
!defined(__ANDROID__)
constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
if (nelem < 1) {
if (nelem < 1)
{
res.clear();
} else {
}
else
{
res.resize(nelem);
}
return res;
@ -686,23 +768,25 @@ std::vector<double> GetLoadAvg() {
} // end namespace
const CPUInfo& CPUInfo::Get() {
const CPUInfo &CPUInfo::Get()
{
static const CPUInfo *info = new CPUInfo();
return *info;
}
CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
cycles_per_second(GetCPUCyclesPerSecond()),
caches(GetCacheSizes()),
scaling_enabled(CpuScalingEnabled(num_cpus)),
load_avg(GetLoadAvg()) {}
: num_cpus(GetNumCPUs()), cycles_per_second(GetCPUCyclesPerSecond()), caches(GetCacheSizes()),
scaling_enabled(CpuScalingEnabled(num_cpus)), load_avg(GetLoadAvg())
{
}
const SystemInfo& SystemInfo::Get() {
const SystemInfo &SystemInfo::Get()
{
static const SystemInfo *info = new SystemInfo();
return *info;
}
SystemInfo::SystemInfo() : name(GetSystemName()) {}
SystemInfo::SystemInfo() : name(GetSystemName())
{
}
} // end namespace benchmark

View File

@ -6,38 +6,47 @@
#include "benchmark/benchmark.h"
#include "mutex.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
class ThreadManager {
class ThreadManager
{
public:
explicit ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads)
{
}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
Mutex &GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_)
{
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
bool StartStopBarrier() EXCLUDES(end_cond_mutex_)
{
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_)
{
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
if (--alive_threads_ == 0)
{
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
void WaitForAllThreads() EXCLUDES(end_cond_mutex_)
{
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
end_condition_.wait(lock.native_handle(), [this]() { return alive_threads_ == 0; });
}
public:
struct Result {
struct Result
{
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;

View File

@ -4,65 +4,83 @@
#include "check.h"
#include "timers.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
class ThreadTimer {
explicit ThreadTimer(bool measure_process_cpu_time_)
: measure_process_cpu_time(measure_process_cpu_time_) {}
class ThreadTimer
{
explicit ThreadTimer(bool measure_process_cpu_time_) : measure_process_cpu_time(measure_process_cpu_time_)
{
}
public:
static ThreadTimer Create() {
static ThreadTimer Create()
{
return ThreadTimer(/*measure_process_cpu_time_=*/false);
}
static ThreadTimer CreateProcessCpuTime() {
static ThreadTimer CreateProcessCpuTime()
{
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
// Called by each thread
void StartTimer() {
void StartTimer()
{
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
void StopTimer() {
void StopTimer()
{
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ +=
std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
cpu_time_used_ += std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
void SetIterationTime(double seconds)
{
manual_time_used_ += seconds;
}
bool running() const { return running_; }
bool running() const
{
return running_;
}
// REQUIRES: timer is not running
double real_time_used() const {
double real_time_used() const
{
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const {
double cpu_time_used() const
{
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const {
double manual_time_used() const
{
CHECK(!running_);
return manual_time_used_;
}
private:
double ReadCpuTimerOfChoice() const {
if (measure_process_cpu_time) return ProcessCPUUsage();
double ReadCpuTimerOfChoice() const
{
if (measure_process_cpu_time)
return ProcessCPUUsage();
return ThreadCPUUsage();
}

View File

@ -57,64 +57,65 @@
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
namespace benchmark
{
// Suppress unused warnings on helper functions.
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
namespace {
namespace
{
#if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
double MakeTime(FILETIME const &kernel_time, FILETIME const &user_time)
{
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) +
static_cast<double>(user.QuadPart)) *
1e-7;
return (static_cast<double>(kernel.QuadPart) + static_cast<double>(user.QuadPart)) * 1e-7;
}
#elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) +
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
double MakeTime(struct rusage const &ru)
{
return (static_cast<double>(ru.ru_utime.tv_sec) + static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
}
#endif
#if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) {
return (static_cast<double>(info.user_time.seconds) +
static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) +
static_cast<double>(info.system_time.microseconds) * 1e-6);
double MakeTime(thread_basic_info_data_t const &info)
{
return (static_cast<double>(info.user_time.seconds) + static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.microseconds) * 1e-6);
}
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) {
double MakeTime(struct timespec const &ts)
{
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
BENCHMARK_NORETURN static void DiagnoseAndExit(const char *msg)
{
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
}
} // end namespace
double ProcessCPUUsage() {
double ProcessCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
&user_time))
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, &user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
@ -132,20 +133,21 @@ double ProcessCPUUsage() {
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
if (getrusage(RUSAGE_SELF, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif
}
double ThreadCPUUsage() {
double ThreadCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
&user_time);
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
@ -153,8 +155,8 @@ double ThreadCPUUsage() {
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
KERN_SUCCESS) {
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == KERN_SUCCESS)
{
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
@ -167,36 +169,42 @@ double ThreadCPUUsage() {
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
if (getrusage(RUSAGE_LWP, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0)
return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else
#error Per-thread timing is not available on your system.
#endif
}
namespace {
namespace
{
std::string DateTimeString(bool local) {
std::string DateTimeString(bool local)
{
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128;
char storage[kStorageSize];
std::size_t written;
if (local) {
if (local)
{
#if defined(BENCHMARK_OS_WINDOWS)
written =
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
written = std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
std::tm timeinfo;
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
} else {
}
else
{
#if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
@ -212,6 +220,9 @@ std::string DateTimeString(bool local) {
} // end namespace
std::string LocalDateTimeString() { return DateTimeString(true); }
std::string LocalDateTimeString()
{
return DateTimeString(true);
}
} // end namespace benchmark

View File

@ -4,7 +4,8 @@
#include <chrono>
#include <string>
namespace benchmark {
namespace benchmark
{
// Return the CPU usage of the current process
double ProcessCPUUsage();
@ -16,18 +17,19 @@ double ChildrenCPUUsage();
double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
struct ChooseSteadyClock {
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> struct ChooseSteadyClock
{
typedef std::chrono::high_resolution_clock type;
};
template <>
struct ChooseSteadyClock<false> {
template <> struct ChooseSteadyClock<false>
{
typedef std::chrono::steady_clock type;
};
#endif
struct ChooseClockType {
struct ChooseClockType
{
#if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type;
#else
@ -35,7 +37,8 @@ struct ChooseClockType {
#endif
};
inline double ChronoClockNow() {
inline double ChronoClockNow()
{
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();

View File

@ -3,17 +3,22 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
void BM_spin_empty(benchmark::State &state)
{
for (auto _ : state)
{
for (int x = 0; x < state.range(0); ++x)
{
benchmark::DoNotOptimize(x);
}
}
@ -21,12 +26,16 @@ void BM_spin_empty(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
void BM_spin_pause_before(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
@ -34,14 +43,18 @@ void BM_spin_pause_before(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
for (auto _ : state) {
void BM_spin_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
@ -49,8 +62,10 @@ void BM_spin_pause_during(benchmark::State& state) {
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
for (auto _ : state) {
void BM_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
state.ResumeTiming();
}
@ -60,67 +75,83 @@ BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
void BM_spin_pause_after(benchmark::State &state)
{
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
void BM_spin_pause_before_and_after(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
for (auto _ : state) {
void BM_empty_stop_start(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State& state) {
void BM_KeepRunning(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning()) {
while (state.KeepRunning())
{
++iter_count;
}
assert(iter_count == state.iterations());
}
BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) {
void BM_KeepRunningBatch(benchmark::State &state)
{
// Choose a prime batch size to avoid evenly dividing max_iterations.
const benchmark::IterationCount batch_size = 101;
benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size)) {
while (state.KeepRunningBatch(batch_size))
{
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
}
BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State& state) {
void BM_RangedFor(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
for (auto _ : state) {
for (auto _ : state)
{
++iter_count;
}
assert(iter_count == state.max_iterations);
@ -129,8 +160,8 @@ BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits.
static_assert(std::is_same<
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value, "");
static_assert(std::is_same<typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value,
"");
BENCHMARK_MAIN();

View File

@ -4,99 +4,115 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace internal {
namespace {
namespace benchmark
{
namespace internal
{
namespace
{
TEST(AddRangeTest, Simple) {
TEST(AddRangeTest, Simple)
{
std::vector<int> dst;
AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Simple64) {
TEST(AddRangeTest, Simple64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Advanced) {
TEST(AddRangeTest, Advanced)
{
std::vector<int> dst;
AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, Advanced64) {
TEST(AddRangeTest, Advanced64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, FullRange8) {
TEST(AddRangeTest, FullRange8)
{
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
}
TEST(AddRangeTest, FullRange64) {
TEST(AddRangeTest, FullRange64)
{
std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT(
dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
1099511627776LL, 1125899906842624LL,
EXPECT_THAT(dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL));
}
TEST(AddRangeTest, NegativeRanges) {
TEST(AddRangeTest, NegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
}
TEST(AddRangeTest, StrictlyNegative) {
TEST(AddRangeTest, StrictlyNegative)
{
std::vector<int> dst;
AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
}
TEST(AddRangeTest, SymmetricNegativeRanges) {
TEST(AddRangeTest, SymmetricNegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
}
TEST(AddRangeTest, SymmetricNegativeRangesOddMult) {
TEST(AddRangeTest, SymmetricNegativeRangesOddMult)
{
std::vector<int> dst;
AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
}
TEST(AddRangeTest, NegativeRangesAsymmetric) {
TEST(AddRangeTest, NegativeRangesAsymmetric)
{
std::vector<int> dst;
AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
}
TEST(AddRangeTest, NegativeRangesLargeStep) {
TEST(AddRangeTest, NegativeRangesLargeStep)
{
// Always include -1, 0, 1 when crossing zero.
std::vector<int> dst;
AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
}
TEST(AddRangeTest, ZeroOnlyRange) {
TEST(AddRangeTest, ZeroOnlyRange)
{
std::vector<int> dst;
AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
}
TEST(AddRangeTest, NegativeRange64) {
TEST(AddRangeTest, NegativeRange64)
{
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
}
TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
TEST(AddRangeTest, NegativeRangePreservesExistingOrder)
{
// If elements already exist in the range, ensure we don't change
// their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3};
@ -104,20 +120,20 @@ TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
}
TEST(AddRangeTest, FullNegativeRange64) {
TEST(AddRangeTest, FullNegativeRange64)
{
std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024);
EXPECT_THAT(
dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL,
-1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL,
1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
1125899906842624LL, 1152921504606846976LL, max}));
EXPECT_THAT(dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL, -1099511627776LL, -1073741824LL, -1048576LL,
-1024LL, -1LL, 0LL, 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, max}));
}
TEST(AddRangeTest, Simple8) {
TEST(AddRangeTest, Simple8)
{
std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));

View File

@ -1,30 +1,35 @@
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
namespace {
namespace
{
using namespace benchmark;
using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty) {
TEST(BenchmarkNameTest, Empty)
{
const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
}
TEST(BenchmarkNameTest, FunctionName) {
TEST(BenchmarkNameTest, FunctionName)
{
auto name = BenchmarkName();
name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
}
TEST(BenchmarkNameTest, FunctionNameAndArgs) {
TEST(BenchmarkNameTest, FunctionNameAndArgs)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
}
TEST(BenchmarkNameTest, MinTime) {
TEST(BenchmarkNameTest, MinTime)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4";
@ -32,7 +37,8 @@ TEST(BenchmarkNameTest, MinTime) {
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
}
TEST(BenchmarkNameTest, Iterations) {
TEST(BenchmarkNameTest, Iterations)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
@ -40,7 +46,8 @@ TEST(BenchmarkNameTest, Iterations) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
}
TEST(BenchmarkNameTest, Repetitions) {
TEST(BenchmarkNameTest, Repetitions)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
@ -48,7 +55,8 @@ TEST(BenchmarkNameTest, Repetitions) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
}
TEST(BenchmarkNameTest, TimeType) {
TEST(BenchmarkNameTest, TimeType)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
@ -56,7 +64,8 @@ TEST(BenchmarkNameTest, TimeType) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
}
TEST(BenchmarkNameTest, Threads) {
TEST(BenchmarkNameTest, Threads)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
@ -64,7 +73,8 @@ TEST(BenchmarkNameTest, Threads) {
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
}
TEST(BenchmarkNameTest, TestEmptyFunctionName) {
TEST(BenchmarkNameTest, TestEmptyFunctionName)
{
auto name = BenchmarkName();
name.args = "first:3/second:4";
name.threads = "threads:22";

View File

@ -24,15 +24,19 @@
#define BENCHMARK_NOINLINE
#endif
namespace {
namespace
{
int BENCHMARK_NOINLINE Factorial(uint32_t n) {
int BENCHMARK_NOINLINE Factorial(uint32_t n)
{
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth) {
double CalculatePi(int depth)
{
double pi = 0.0;
for (int i = 0; i < depth; ++i) {
for (int i = 0; i < depth; ++i)
{
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
@ -40,9 +44,11 @@ double CalculatePi(int depth) {
return (pi - 1.0) * 4;
}
std::set<int64_t> ConstructRandomSet(int64_t size) {
std::set<int64_t> ConstructRandomSet(int64_t size)
{
std::set<int64_t> s;
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
for (int i = 0; i < size; ++i)
s.insert(s.end(), i);
return s;
}
@ -51,9 +57,11 @@ std::vector<int>* test_vector = nullptr;
} // end namespace
static void BM_Factorial(benchmark::State& state) {
static void BM_Factorial(benchmark::State &state)
{
int fac_42 = 0;
for (auto _ : state) fac_42 = Factorial(8);
for (auto _ : state)
fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
@ -62,18 +70,22 @@ static void BM_Factorial(benchmark::State& state) {
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
static void BM_CalculatePiRange(benchmark::State &state)
{
double pi = 0.0;
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
for (auto _ : state)
pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static void BM_CalculatePi(benchmark::State &state)
{
static const int depth = 1024;
for (auto _ : state) {
for (auto _ : state)
{
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
}
@ -81,13 +93,16 @@ BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
static void BM_SetInsert(benchmark::State &state)
{
std::set<int64_t> data;
for (auto _ : state) {
for (auto _ : state)
{
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
for (int j = 0; j < state.range(1); ++j)
data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
@ -97,41 +112,47 @@ static void BM_SetInsert(benchmark::State& state) {
// non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
template <typename Container, typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State &state)
{
ValueType v = 42;
for (auto _ : state) {
for (auto _ : state)
{
Container c;
for (int64_t i = state.range(0); --i;) c.push_back(v);
for (int64_t i = state.range(0); --i;)
c.push_back(v);
}
const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
static void BM_StringCompare(benchmark::State &state)
{
size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-');
std::string s2(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
for (auto _ : state)
benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) {
if (state.thread_index == 0) {
static void BM_SetupTeardown(benchmark::State &state)
{
if (state.thread_index == 0)
{
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
for (auto _ : state) {
for (auto _ : state)
{
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
@ -139,60 +160,67 @@ static void BM_SetupTeardown(benchmark::State& state) {
test_vector->pop_back();
++i;
}
if (state.thread_index == 0) {
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
static void BM_LongTest(benchmark::State &state)
{
double tracker = 0.0;
for (auto _ : state) {
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
static void BM_ParallelMemset(benchmark::State &state)
{
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
if (state.thread_index == 0)
{
test_vector = new std::vector<int>(static_cast<size_t>(size));
}
for (auto _ : state) {
for (int i = from; i < to; i++) {
for (auto _ : state)
{
for (int i = from; i < to; i++)
{
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0) {
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
static void BM_ManualTiming(benchmark::State &state)
{
int64_t slept_for = 0;
int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
std::chrono::duration<double, std::micro> sleep_duration{static_cast<double>(microseconds)};
for (auto _ : state) {
for (auto _ : state)
{
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
@ -204,24 +232,29 @@ BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
for (auto _ : state) {
template <class... Args> void BM_with_args(benchmark::State &state, Args &&...)
{
for (auto _ : state)
{
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
std::pair<int, double>(42, 3.8));
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) {
while(state.KeepRunning()) {}
void BM_non_template_args(benchmark::State &state, int, double)
{
while (state.KeepRunning())
{
}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
static void BM_DenseThreadRanges(benchmark::State &st)
{
switch (st.range(0))
{
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
@ -229,13 +262,13 @@ static void BM_DenseThreadRanges(benchmark::State& st) {
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
st.threads == 14);
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning()) {
while (st.KeepRunning())
{
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);

View File

@ -4,16 +4,17 @@
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
}
// CHECK-LABEL: test_basic:
extern "C" void test_basic() {
extern "C" void test_basic()
{
int x;
benchmark::DoNotOptimize(&x);
x = 101;
@ -24,7 +25,8 @@ extern "C" void test_basic() {
}
// CHECK-LABEL: test_redundant_store:
extern "C" void test_redundant_store() {
extern "C" void test_redundant_store()
{
ExternInt = 3;
benchmark::ClobberMemory();
ExternInt = 51;
@ -34,7 +36,8 @@ extern "C" void test_redundant_store() {
}
// CHECK-LABEL: test_redundant_read:
extern "C" void test_redundant_read() {
extern "C" void test_redundant_read()
{
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
@ -48,7 +51,8 @@ extern "C" void test_redundant_read() {
}
// CHECK-LABEL: test_redundant_read2:
extern "C" void test_redundant_read2() {
extern "C" void test_redundant_read2()
{
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;

View File

@ -4,33 +4,41 @@
#include "../src/internal_macros.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
#if defined(BENCHMARK_OS_WINDOWS)
int setenv(const char* name, const char* value, int overwrite) {
if (!overwrite) {
int setenv(const char *name, const char *value, int overwrite)
{
if (!overwrite)
{
// NOTE: getenv_s is far superior but not available under mingw.
char *env_value = getenv(name);
if (env_value == nullptr) {
if (env_value == nullptr)
{
return -1;
}
}
return _putenv_s(name, value);
}
int unsetenv(const char* name) {
int unsetenv(const char *name)
{
return _putenv_s(name, "");
}
#endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) {
TEST(BoolFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
}
TEST(BoolFromEnv, False) {
TEST(BoolFromEnv, False)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
@ -88,7 +96,8 @@ TEST(BoolFromEnv, False) {
unsetenv("BENCHMARK_IN_ENV");
}
TEST(BoolFromEnv, True) {
TEST(BoolFromEnv, True)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
@ -152,46 +161,54 @@ TEST(BoolFromEnv, True) {
#endif
}
TEST(Int32FromEnv, NotInEnv) {
TEST(Int32FromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
}
TEST(Int32FromEnv, InvalidInteger) {
TEST(Int32FromEnv, InvalidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(Int32FromEnv, ValidInteger) {
TEST(Int32FromEnv, ValidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(DoubleFromEnv, NotInEnv) {
TEST(DoubleFromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
}
TEST(DoubleFromEnv, InvalidReal) {
TEST(DoubleFromEnv, InvalidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(DoubleFromEnv, ValidReal) {
TEST(DoubleFromEnv, ValidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(StringFromEnv, Default) {
TEST(StringFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
}
TEST(StringFromEnv, Valid) {
TEST(StringFromEnv, Valid)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("BENCHMARK_IN_ENV");

View File

@ -1,28 +1,26 @@
#undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace {
namespace
{
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
#define ADD_COMPLEXITY_CASES(...) int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
std::string rms_test_name, std::string big_o) {
int AddComplexityTest(std::string test_name, std::string big_o_test_name, std::string rms_test_name, std::string big_o)
{
SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
AddCases(TC_ConsoleOut, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
@ -56,9 +54,12 @@ int AddComplexityTest(std::string test_name, std::string big_o_test_name,
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
void BM_Complexity_O1(benchmark::State &state)
{
for (auto _ : state)
{
for (int i = 0; i < 1024; ++i)
{
benchmark::DoNotOptimize(&i);
}
}
@ -66,9 +67,7 @@ void BM_Complexity_O1(benchmark::State& state) {
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
->Complexity([](benchmark::IterationCount) { return 1.0; });
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](benchmark::IterationCount) { return 1.0; });
const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
@ -81,53 +80,46 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
enum_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
// Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
auto_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
lambda_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std::vector<int> ConstructRandomVector(int64_t size) {
std::vector<int> ConstructRandomVector(int64_t size)
{
std::vector<int> v;
v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i) {
for (int i = 0; i < size; ++i)
{
v.push_back(static_cast<int>(std::rand() % size));
}
return v;
}
void BM_Complexity_O_N(benchmark::State& state) {
void BM_Complexity_O_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
for (auto _ : state)
{
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
->Complexity([](benchmark::IterationCount n) -> double { return static_cast<double>(n); });
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
@ -136,39 +128,31 @@ const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n);
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
lambda_big_o_n);
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
static void BM_Complexity_O_N_log_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) {
for (auto _ : state)
{
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
}
static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
->Complexity([](benchmark::IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); });
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
@ -177,37 +161,36 @@ const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= //
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
for (auto _ : state) {
void BM_ComplexityCaptureArgs(benchmark::State &state, int n)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(n);
}
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)->Complexity(benchmark::oN)->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name =
"BM_ComplexityCaptureArgs/capture_test";
const std::string complexity_capture_name = "BM_ComplexityCaptureArgs/capture_test";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
complexity_capture_name + "_RMS", "N");
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", complexity_capture_name + "_RMS", "N");
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -12,8 +12,10 @@
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
#endif
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
void BM_empty(benchmark::State &state)
{
while (state.KeepRunning())
{
volatile benchmark::IterationCount x = state.iterations();
((void)x);
}
@ -22,39 +24,43 @@ BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) {
assert((state.range(0) == 1 && state.range(1) == 2) ||
(state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) {
void BM_old_arg_range_interface(benchmark::State &state)
{
assert((state.range(0) == 1 && state.range(1) == 2) || (state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning())
{
}
}
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U>
void BM_template2(benchmark::State& state) {
template <class T, class U> void BM_template2(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T>
void BM_template1(benchmark::State& state) {
template <class T> void BM_template1(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
template <class T>
struct BM_Fixture : public ::benchmark::Fixture {
template <class T> struct BM_Fixture : public ::benchmark::Fixture
{
};
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State &state)
{
BM_empty(state);
}
void BM_counters(benchmark::State& state) {
void BM_counters(benchmark::State &state)
{
BM_empty(state);
state.counters["Foo"] = 2;
}

View File

@ -17,7 +17,8 @@
#define TEST_HAS_NO_EXCEPTIONS
#endif
void TestHandler() {
void TestHandler()
{
#ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error("");
#else
@ -25,55 +26,70 @@ void TestHandler() {
#endif
}
void try_invalid_pause_resume(benchmark::State& state) {
void try_invalid_pause_resume(benchmark::State &state)
{
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
try
{
state.PauseTiming();
std::abort();
} catch (std::logic_error const&) {
}
try {
catch (std::logic_error const &)
{
}
try
{
state.ResumeTiming();
std::abort();
} catch (std::logic_error const&) {
}
catch (std::logic_error const &)
{
}
#else
(void)state; // avoid unused warning
#endif
}
void BM_diagnostic_test(benchmark::State& state) {
void BM_diagnostic_test(benchmark::State &state)
{
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
for (auto _ : state) {
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State& state) {
void BM_diagnostic_test_keep_running(benchmark::State &state)
{
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
while(state.KeepRunning()) {
while (state.KeepRunning())
{
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();

View File

@ -10,22 +10,24 @@
// reporter in the presence of DisplayAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
{
std::cout << "Precondition mismatch. Expected to only find 6 "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3\", "

View File

@ -4,36 +4,45 @@
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
inline int Add42(int x) { return x + 42; }
inline int Add42(int x)
{
return x + 42;
}
struct NotTriviallyCopyable {
struct NotTriviallyCopyable
{
NotTriviallyCopyable();
explicit NotTriviallyCopyable(int x) : value(x) {}
explicit NotTriviallyCopyable(int x) : value(x)
{
}
NotTriviallyCopyable(NotTriviallyCopyable const &);
int value;
};
struct Large {
struct Large
{
int value;
int data[2];
};
}
// CHECK-LABEL: test_with_rvalue:
extern "C" void test_with_rvalue() {
extern "C" void test_with_rvalue()
{
benchmark::DoNotOptimize(Add42(0));
// CHECK: movl $42, %eax
// CHECK: ret
}
// CHECK-LABEL: test_with_large_rvalue:
extern "C" void test_with_large_rvalue() {
extern "C" void test_with_large_rvalue()
{
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
@ -43,14 +52,16 @@ extern "C" void test_with_large_rvalue() {
}
// CHECK-LABEL: test_with_non_trivial_rvalue:
extern "C" void test_with_non_trivial_rvalue() {
extern "C" void test_with_non_trivial_rvalue()
{
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret
}
// CHECK-LABEL: test_with_lvalue:
extern "C" void test_with_lvalue() {
extern "C" void test_with_lvalue()
{
int x = 101;
benchmark::DoNotOptimize(x);
// CHECK-GNU: movl $101, %eax
@ -59,7 +70,8 @@ extern "C" void test_with_lvalue() {
}
// CHECK-LABEL: test_with_large_lvalue:
extern "C" void test_with_large_lvalue() {
extern "C" void test_with_large_lvalue()
{
Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
@ -70,7 +82,8 @@ extern "C" void test_with_large_lvalue() {
}
// CHECK-LABEL: test_with_non_trivial_lvalue:
extern "C" void test_with_non_trivial_lvalue() {
extern "C" void test_with_non_trivial_lvalue()
{
NotTriviallyCopyable NTC(ExternInt);
benchmark::DoNotOptimize(NTC);
// CHECK: ExternInt(%rip)
@ -79,7 +92,8 @@ extern "C" void test_with_non_trivial_lvalue() {
}
// CHECK-LABEL: test_with_const_lvalue:
extern "C" void test_with_const_lvalue() {
extern "C" void test_with_const_lvalue()
{
const int x = 123;
benchmark::DoNotOptimize(x);
// CHECK: movl $123, %eax
@ -87,7 +101,8 @@ extern "C" void test_with_const_lvalue() {
}
// CHECK-LABEL: test_with_large_const_lvalue:
extern "C" void test_with_large_const_lvalue() {
extern "C" void test_with_large_const_lvalue()
{
const Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
@ -98,7 +113,8 @@ extern "C" void test_with_large_const_lvalue() {
}
// CHECK-LABEL: test_with_non_trivial_const_lvalue:
extern "C" void test_with_non_trivial_const_lvalue() {
extern "C" void test_with_non_trivial_const_lvalue()
{
const NotTriviallyCopyable Obj(ExternInt);
benchmark::DoNotOptimize(Obj);
// CHECK: mov{{q|l}} ExternInt(%rip)
@ -106,7 +122,8 @@ extern "C" void test_with_non_trivial_const_lvalue() {
}
// CHECK-LABEL: test_div_by_two:
extern "C" int test_div_by_two(int input) {
extern "C" int test_div_by_two(int input)
{
int divisor = 2;
benchmark::DoNotOptimize(divisor);
return input / divisor;
@ -116,7 +133,8 @@ extern "C" int test_div_by_two(int input) {
}
// CHECK-LABEL: test_inc_integer:
extern "C" int test_inc_integer() {
extern "C" int test_inc_integer()
{
int x = 0;
for (int i = 0; i < 5; ++i)
benchmark::DoNotOptimize(++x);
@ -131,7 +149,8 @@ extern "C" int test_inc_integer() {
}
// CHECK-LABEL: test_pointer_rvalue
extern "C" void test_pointer_rvalue() {
extern "C" void test_pointer_rvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
@ -141,7 +160,8 @@ extern "C" void test_pointer_rvalue() {
}
// CHECK-LABEL: test_pointer_const_lvalue:
extern "C" void test_pointer_const_lvalue() {
extern "C" void test_pointer_const_lvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
@ -152,7 +172,8 @@ extern "C" void test_pointer_const_lvalue() {
}
// CHECK-LABEL: test_pointer_lvalue:
extern "C" void test_pointer_lvalue() {
extern "C" void test_pointer_lvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])

View File

@ -2,30 +2,40 @@
#include <cstdint>
namespace {
namespace
{
#if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
std::uint64_t double_up(const std::uint64_t x)
{
return x * 2;
}
} // namespace
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang.
struct BitRef {
struct BitRef
{
int index;
unsigned char &byte;
public:
static BitRef Make() {
static BitRef Make()
{
static unsigned char arr[2] = {};
BitRef b(1, arr[0]);
return b;
}
private:
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
BitRef(int i, unsigned char &b) : index(i), byte(b)
{
}
};
int main(int, char*[]) {
int main(int, char *[])
{
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8] = "";

View File

@ -10,24 +10,35 @@
#include <sstream>
#include <string>
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual bool ReportContext(const Context& context) {
virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
virtual void ReportRuns(const std::vector<Run> &report)
{
++count_;
ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0) {}
TestReporter() : count_(0)
{
}
virtual ~TestReporter() {}
virtual ~TestReporter()
{
}
size_t GetCount() const { return count_; }
size_t GetCount() const
{
return count_;
}
private:
mutable size_t count_;
@ -35,67 +46,77 @@ class TestReporter : public benchmark::ConsoleReporter {
} // end namespace
static void NoPrefix(benchmark::State& state) {
for (auto _ : state) {
static void NoPrefix(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
for (auto _ : state) {
static void BM_Foo(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
for (auto _ : state) {
static void BM_Bar(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
for (auto _ : state) {
static void BM_FooBar(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
for (auto _ : state) {
static void BM_FooBa(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_FooBa);
int main(int argc, char **argv) {
int main(int argc, char **argv)
{
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
std::string::npos;
list_only |= std::string(argv[i]).find("--benchmark_list_tests") != std::string::npos;
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&test_reporter);
const size_t returned_count = benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) {
if (argc == 2)
{
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) {
if (returned_count != expected_return)
{
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = "
<< returned_count << std::endl;
<< " tests to match the filter but returned_count = " << returned_count << std::endl;
return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
if (reports_count != expected_reports)
{
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
<< " tests to be run but reported_count = " << reports_count << std::endl;
return -1;
}
}

View File

@ -4,40 +4,53 @@
#include <cassert>
#include <memory>
class MyFixture : public ::benchmark::Fixture {
class MyFixture : public ::benchmark::Fixture
{
public:
void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
void SetUp(const ::benchmark::State &state)
{
if (state.thread_index == 0)
{
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
void TearDown(const ::benchmark::State& state) {
if (state.thread_index == 0) {
void TearDown(const ::benchmark::State &state)
{
if (state.thread_index == 0)
{
assert(data.get() != nullptr);
data.reset();
}
}
~MyFixture() { assert(data == nullptr); }
~MyFixture()
{
assert(data == nullptr);
}
std::unique_ptr<int> data;
};
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st)
{
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st) {
for (auto _ : st)
{
}
}
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State &st)
{
if (st.thread_index == 0)
{
assert(data.get() != nullptr);
assert(*data == 42);
}
for (auto _ : st) {
for (auto _ : st)
{
assert(data.get() != nullptr);
assert(*data == 42);
}

View File

@ -1,27 +1,26 @@
#undef NDEBUG
#include <chrono>
#include <thread>
#include "../src/timers.h"
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <chrono>
#include <thread>
static const std::chrono::duration<double, std::milli> time_frame(50);
static const double time_frame_in_sec(
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(
time_frame)
.count());
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(time_frame).count());
void MyBusySpinwait() {
void MyBusySpinwait()
{
const auto start = benchmark::ChronoClockNow();
while (true) {
while (true)
{
const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start;
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >=
time_frame)
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= time_frame)
return;
}
}
@ -33,152 +32,92 @@ void MyBusySpinwait() {
// ========================================================================= //
// BM_MainThread
void BM_MainThread(benchmark::State& state) {
for (auto _ : state) {
void BM_MainThread(benchmark::State &state)
{
for (auto _ : state)
{
MyBusySpinwait();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// BM_WorkerThread
void BM_WorkerThread(benchmark::State& state) {
for (auto _ : state) {
void BM_WorkerThread(benchmark::State &state)
{
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait);
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// BM_MainThreadAndWorkerThread
void BM_MainThreadAndWorkerThread(benchmark::State& state) {
for (auto _ : state) {
void BM_MainThreadAndWorkerThread(benchmark::State &state)
{
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait);
MyBusySpinwait();
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// ---------------------------- TEST CASES END ----------------------------- //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -1,7 +1,9 @@
#include "benchmark/benchmark.h"
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}

View File

@ -3,11 +3,14 @@
#include <cstdlib>
#include <map>
namespace {
namespace
{
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> ConstructRandomMap(int size)
{
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
for (int i = 0; i < size; ++i)
{
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
@ -16,14 +19,17 @@ std::map<int, int> ConstructRandomMap(int size) {
} // namespace
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
static void BM_MapLookup(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0));
std::map<int, int> m;
for (auto _ : state) {
for (auto _ : state)
{
state.PauseTiming();
m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
@ -32,21 +38,29 @@ static void BM_MapLookup(benchmark::State& state) {
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
class MapFixture : public ::benchmark::Fixture
{
public:
void SetUp(const ::benchmark::State& st) {
void SetUp(const ::benchmark::State &st)
{
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) { m.clear(); }
void TearDown(const ::benchmark::State &)
{
m.clear();
}
std::map<int, int> m;
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0));
for (auto _ : state) {
for (int i = 0; i < size; ++i) {
for (auto _ : state)
{
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}

View File

@ -4,16 +4,22 @@
#include "benchmark/benchmark.h"
#include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager {
void Start() {}
void Stop(Result* result) {
class TestMemoryManager : public benchmark::MemoryManager
{
void Start()
{
}
void Stop(Result *result)
{
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
};
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
@ -35,7 +41,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get());

View File

@ -5,7 +5,8 @@
#include <set>
#include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture {
class MultipleRangesFixture : public ::benchmark::Fixture
{
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
@ -26,11 +27,13 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}}) {}
{7, 6, 3}})
{
}
void SetUp(const ::benchmark::State& state) {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2)};
void SetUp(const ::benchmark::State &state)
{
std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
@ -39,20 +42,26 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture() {
if (actualValues != expectedValues) {
virtual ~MultipleRangesFixture()
{
if (actualValues != expectedValues)
{
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
for (auto v : expectedValues)
{
std::cout << "{";
for (int64_t iv : v) {
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues) {
for (auto v : actualValues)
{
std::cout << "{";
for (int64_t iv : v) {
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
@ -64,10 +73,13 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
std::set<std::vector<int64_t>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
for (auto _ : state) {
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State &state)
{
for (auto _ : state)
{
int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++) {
for (int64_t x = 0; x < product; x++)
{
benchmark::DoNotOptimize(x);
}
}
@ -78,17 +90,21 @@ BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) {
void BM_CheckDefaultArgument(benchmark::State &state)
{
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
for (auto _ : state) {
for (auto _ : state)
{
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
for (auto _ : st) {
static void BM_MultipleRanges(benchmark::State &st)
{
for (auto _ : st)
{
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});

View File

@ -7,16 +7,19 @@
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
void BM_basic(benchmark::State &state)
{
for (auto _ : state)
{
}
}
void BM_basic_slow(benchmark::State& state) {
void BM_basic_slow(benchmark::State &state)
{
std::chrono::milliseconds sleep_duration(state.range(0));
for (auto _ : state) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
for (auto _ : state)
{
std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
}
@ -37,8 +40,7 @@ BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
BENCHMARK(BM_basic)
->RangeMultiplier(std::numeric_limits<int>::max())
->Range(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max());
->Range(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max());
// Negative ranges
BENCHMARK(BM_basic)->Range(-64, -1);
@ -46,15 +48,18 @@ BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
void CustomArgs(benchmark::internal::Benchmark *b)
{
for (int i = 0; i < 10; ++i)
{
b->Arg(i);
}
}
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& state) {
void BM_explicit_iteration_count(benchmark::State &state)
{
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
@ -68,7 +73,6 @@ void BM_explicit_iteration_count(benchmark::State& state) {
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);

View File

@ -18,17 +18,18 @@
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules {
enum MatchRules
{
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
};
struct TestCase {
struct TestCase
{
TestCase(std::string re, int rule = MR_Default);
std::string regex_str;
@ -37,7 +38,8 @@ struct TestCase {
std::shared_ptr<benchmark::Regex> regex;
};
enum TestCaseID {
enum TestCaseID
{
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
@ -54,8 +56,7 @@ int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il);
int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests.
void RunOutputTests(int argc, char *argv[]);
@ -89,19 +90,26 @@ size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark.
// It is passed in calls to checker functions.
struct Results {
struct Results
{
// the benchmark name
std::string name;
// the benchmark fields
std::map<std::string, std::string> values;
Results(const std::string& n) : name(n) {}
Results(const std::string &n) : name(n)
{
}
int NumThreads() const;
double NumIterations() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime;
typedef enum
{
kCpuTime,
kRealTime
} BenchmarkTime;
// get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const;
@ -109,39 +117,42 @@ struct Results {
// get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy.
double DurationRealTime() const {
double DurationRealTime() const
{
return NumIterations() * GetTime(kRealTime);
}
// get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const {
double DurationCPUTime() const
{
return NumIterations() * GetTime(kCpuTime);
}
// get the string for a result by name, or nullptr if the name
// is not found
const std::string* Get(const char* entry_name) const {
const std::string *Get(const char *entry_name) const
{
auto it = values.find(entry_name);
if (it == values.end()) return nullptr;
if (it == values.end())
return nullptr;
return &it->second;
}
// get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead.
template <class T>
T GetAs(const char* entry_name) const;
template <class T> T GetAs(const char *entry_name) const;
// counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type.
template <class T>
T GetCounterAs(const char* entry_name) const {
template <class T> T GetCounterAs(const char *entry_name) const
{
double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval);
return tval;
}
};
template <class T>
T Results::GetAs(const char* entry_name) const {
template <class T> T Results::GetAs(const char *entry_name) const
{
auto *sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
@ -204,7 +215,8 @@ T Results::GetAs(const char* entry_name) const {
// --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= //
namespace {
namespace
{
const char *const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";

View File

@ -16,8 +16,10 @@
// ========================================================================= //
// ------------------------------ Internals -------------------------------- //
// ========================================================================= //
namespace internal {
namespace {
namespace internal
{
namespace
{
using TestCaseList = std::vector<TestCase>;
@ -28,14 +30,16 @@ using TestCaseList = std::vector<TestCase>;
// Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) {
TestCaseList &GetTestCaseList(TestCaseID ID)
{
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
}
SubMap& GetSubstitutions() {
SubMap &GetSubstitutions()
{
// Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
@ -69,13 +73,16 @@ SubMap& GetSubstitutions() {
return map;
}
std::string PerformSubstitutions(std::string source) {
std::string PerformSubstitutions(std::string source)
{
SubMap const &subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const& KV : subs) {
for (auto const &KV : subs)
{
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
while ((pos = source.find(KV.first, next_start)) != std::string::npos)
{
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
@ -83,44 +90,47 @@ std::string PerformSubstitutions(std::string source) {
return source;
}
void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
TestCaseList const& not_checks) {
void CheckCase(std::stringstream &remaining_output, TestCase const &TC, TestCaseList const &not_checks)
{
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
while (remaining_output.eof() == false)
{
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
if (on_first)
{
first_line = line;
on_first = false;
}
for (const auto& NC : not_checks) {
for (const auto &NC : not_checks)
{
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
if (TC.regex->Match(line))
return;
CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "End of output reached before match for regex \"" << TC.regex_str << "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
void CheckCases(TestCaseList const& checks, std::stringstream& output) {
void CheckCases(TestCaseList const &checks, std::stringstream &output)
{
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) {
for (size_t i = 0; i < checks.size(); ++i)
{
const auto &TC = checks[i];
if (TC.match_rule == MR_Not) {
if (TC.match_rule == MR_Not)
{
not_checks.push_back(TC);
continue;
}
@ -129,18 +139,21 @@ void CheckCases(TestCaseList const& checks, std::stringstream& output) {
}
}
class TestReporter : public benchmark::BenchmarkReporter {
class TestReporter : public benchmark::BenchmarkReporter
{
public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
TestReporter(std::vector<benchmark::BenchmarkReporter *> reps) : reporters_(reps)
{
}
virtual bool ReportContext(const Context& context) {
virtual bool ReportContext(const Context &context)
{
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
for (auto rep : reporters_)
{
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
@ -148,11 +161,15 @@ class TestReporter : public benchmark::BenchmarkReporter {
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) {
for (auto rep : reporters_) rep->ReportRuns(report);
void ReportRuns(const std::vector<Run> &report)
{
for (auto rep : reporters_)
rep->ReportRuns(report);
}
void Finalize() {
for (auto rep : reporters_) rep->Finalize();
void Finalize()
{
for (auto rep : reporters_)
rep->Finalize();
}
private:
@ -166,15 +183,19 @@ class TestReporter : public benchmark::BenchmarkReporter {
// -------------------------- Results checking ----------------------------- //
// ========================================================================= //
namespace internal {
namespace internal
{
// Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results.
class ResultsChecker {
class ResultsChecker
{
public:
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
: TestCase(rx), fn(fn_) {}
struct PatternAndFn : public TestCase
{ // reusing TestCase for its regexes
PatternAndFn(const std::string &rx, ResultsCheckFn fn_) : TestCase(rx), fn(fn_)
{
}
ResultsCheckFn fn;
};
@ -195,35 +216,41 @@ class ResultsChecker {
// store the static ResultsChecker in a function to prevent initialization
// order problems
ResultsChecker& GetResultsChecker() {
ResultsChecker &GetResultsChecker()
{
static ResultsChecker rc;
return rc;
}
// add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
void ResultsChecker::Add(const std::string &entry_pattern, ResultsCheckFn fn)
{
check_patterns.emplace_back(entry_pattern, fn);
}
// check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) {
void ResultsChecker::CheckResults(std::stringstream &output)
{
// first reset the stream to the start
{
auto start = std::stringstream::pos_type(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
if (output.tellg() > start) output.seekg(start);
if (output.tellg() > start)
output.seekg(start);
// and just in case
output.clear();
}
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false) {
while (output.eof() == false)
{
CHECK(output.good());
std::getline(output, line);
if (on_first) {
if (on_first)
{
SetHeader_(line); // this is important
on_first = false;
continue;
@ -231,14 +258,19 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
SetValues_(line);
}
// finally we can call the subscribed check functions
for (const auto& p : check_patterns) {
for (const auto &p : check_patterns)
{
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto& r : results) {
if (!p.regex->Match(r.name)) {
for (const auto &r : results)
{
if (!p.regex->Match(r.name))
{
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
} else {
}
else
{
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
@ -249,56 +281,71 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
}
// prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) {
void ResultsChecker::SetHeader_(const std::string &csv_header)
{
field_names = SplitCsv_(csv_header);
}
// set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
if (entry_csv_line.empty()) return; // some lines are empty
void ResultsChecker::SetValues_(const std::string &entry_csv_line)
{
if (entry_csv_line.empty())
return; // some lines are empty
CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto &entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) {
for (size_t i = 1, e = vals.size(); i < e; ++i)
{
entry.values[field_names[i]] = vals[i];
}
}
// a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string &line)
{
std::vector<std::string> out;
if (line.empty()) return out;
if (!field_names.empty()) out.reserve(field_names.size());
if (line.empty())
return out;
if (!field_names.empty())
out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) {
while (pos != line.npos)
{
CHECK(curr > 0);
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev));
return out;
}
} // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) {
size_t AddChecker(const char *bm_name, ResultsCheckFn fn)
{
auto &rc = internal::GetResultsChecker();
rc.Add(bm_name, fn);
return rc.results.size();
}
int Results::NumThreads() const {
int Results::NumThreads() const
{
auto pos = name.find("/threads:");
if (pos == name.npos) return 1;
if (pos == name.npos)
return 1;
auto end = name.find('/', pos + 9);
std::stringstream ss;
ss << name.substr(pos + 9, end);
@ -308,25 +355,36 @@ int Results::NumThreads() const {
return num;
}
double Results::NumIterations() const {
double Results::NumIterations() const
{
return GetAs<double>("iterations");
}
double Results::GetTime(BenchmarkTime which) const {
double Results::GetTime(BenchmarkTime which) const
{
CHECK(which == kCpuTime || which == kRealTime);
const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
CHECK(unit);
if (*unit == "ns") {
if (*unit == "ns")
{
return val * 1.e-9;
} else if (*unit == "us") {
}
else if (*unit == "us")
{
return val * 1.e-6;
} else if (*unit == "ms") {
}
else if (*unit == "ms")
{
return val * 1.e-3;
} else if (*unit == "s") {
}
else if (*unit == "s")
{
return val;
} else {
}
else
{
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
@ -337,38 +395,41 @@ double Results::GetTime(BenchmarkTime which) const {
// ========================================================================= //
TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)),
match_rule(rule),
substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>()) {
: regex_str(std::move(re)), match_rule(rule), substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>())
{
std::string err_str;
regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il)
{
auto &L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
}
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il) {
int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il)
{
auto &subs = internal::GetSubstitutions();
for (auto KV : il) {
for (auto KV : il)
{
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) {
if (EKV.first == KV.first) {
for (auto &EKV : subs)
{
if (EKV.first == KV.first)
{
EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists) subs.push_back(std::move(KV));
if (!exists)
subs.push_back(std::move(KV));
}
return 0;
}
@ -379,14 +440,16 @@ int SetSubstitutions(
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
void RunOutputTests(int argc, char* argv[]) {
void RunOutputTests(int argc, char *argv[])
{
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
struct ReporterTest
{
const char *name;
std::vector<TestCase> &output_cases;
std::vector<TestCase> &error_cases;
@ -394,20 +457,17 @@ void RunOutputTests(int argc, char* argv[]) {
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc,
std::vector<TestCase>& err_tc,
ReporterTest(const char *n, std::vector<TestCase> &out_tc, std::vector<TestCase> &err_tc,
benchmark::BenchmarkReporter &br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br)
{
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), CSVR},
};
// Create the test reporter and run the benchmarks.
@ -415,7 +475,8 @@ void RunOutputTests(int argc, char* argv[]) {
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) {
for (auto &rep_test : TestCases)
{
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
@ -441,8 +502,10 @@ void RunOutputTests(int argc, char* argv[]) {
#pragma GCC diagnostic pop
#endif
int SubstrCnt(const std::string& haystack, const std::string& pat) {
if (pat.length() == 0) return 0;
int SubstrCnt(const std::string &haystack, const std::string &pat)
{
if (pat.length() == 0)
return 0;
int count = 0;
for (size_t offset = haystack.find(pat); offset != std::string::npos;
offset = haystack.find(pat, offset + pat.length()))
@ -450,37 +513,43 @@ int SubstrCnt(const std::string& haystack, const std::string& pat) {
return count;
}
static char ToHex(int ch) {
return ch < 10 ? static_cast<char>('0' + ch)
: static_cast<char>('a' + (ch - 10));
static char ToHex(int ch)
{
return ch < 10 ? static_cast<char>('0' + ch) : static_cast<char>('a' + (ch - 10));
}
static char RandomHexChar() {
static char RandomHexChar()
{
static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
}
static std::string GetRandomFileName() {
static std::string GetRandomFileName()
{
std::string model = "test.%%%%%%";
for (auto & ch : model) {
for (auto &ch : model)
{
if (ch == '%')
ch = RandomHexChar();
}
return model;
}
static bool FileExists(std::string const& name) {
static bool FileExists(std::string const &name)
{
std::ifstream in(name.c_str());
return in.good();
}
static std::string GetTempFileName() {
static std::string GetTempFileName()
{
// This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries) {
while (--retries)
{
std::string name = GetRandomFileName();
if (!FileExists(name))
return name;
@ -489,7 +558,8 @@ static std::string GetTempFileName() {
std::abort();
}
std::string GetFileReporterOutput(int argc, char* argv[]) {
std::string GetFileReporterOutput(int argc, char *argv[])
{
std::vector<char *> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
@ -507,8 +577,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]) {
// Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
std::istreambuf_iterator<char>());
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::istreambuf_iterator<char>());
std::remove(tmp_file_name.c_str());
return output;

View File

@ -6,11 +6,14 @@
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual void ReportRuns(const std::vector<Run>& report) {
virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
@ -18,17 +21,22 @@ class TestReporter : public benchmark::ConsoleReporter {
std::vector<Run> all_runs_;
};
struct TestCase {
struct TestCase
{
std::string name;
const char *label;
// Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname) : TestCase(xname, nullptr) {}
TestCase(const char* xname, const char* xlabel)
: name(xname), label(xlabel) {}
TestCase(const char *xname) : TestCase(xname, nullptr)
{
}
TestCase(const char *xname, const char *xlabel) : name(xname), label(xlabel)
{
}
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
void CheckRun(Run const &run) const
{
// clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name();
@ -44,8 +52,10 @@ struct TestCase {
std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) {
for (auto N : v) {
int AddCases(std::initializer_list<TestCase> const &v)
{
for (auto N : v)
{
ExpectedResults.push_back(N);
}
return 0;
@ -62,13 +72,14 @@ typedef benchmark::internal::Benchmark* ReturnVal;
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
for (auto _ : state) {
void BM_function(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark(
"BM_function_manual_registration", BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark("BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
@ -78,14 +89,16 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
for (auto _ : st) {
void BM_extra_args(benchmark::State &st, const char *label)
{
for (auto _ : st)
{
}
st.SetLabel(label);
}
int RegisterFromFunction() {
std::pair<const char*, const char*> cases[] = {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
int RegisterFromFunction()
{
std::pair<const char *, const char *> cases[] = {{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const &c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
@ -99,14 +112,18 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
// Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------//
struct CustomFixture {
void operator()(benchmark::State& st) {
for (auto _ : st) {
struct CustomFixture
{
void operator()(benchmark::State &st)
{
for (auto _ : st)
{
}
}
};
void TestRegistrationAtRuntime() {
void TestRegistrationAtRuntime()
{
#ifdef BENCHMARK_HAS_CXX11
{
CustomFixture fx;
@ -118,7 +135,8 @@ void TestRegistrationAtRuntime() {
{
const char *x = "42";
auto capturing_lam = [=](benchmark::State &st) {
for (auto _ : st) {
for (auto _ : st)
{
}
st.SetLabel(x);
};
@ -130,7 +148,8 @@ void TestRegistrationAtRuntime() {
// Test that all benchmarks, registered at either during static init or runtime,
// are run and the results are passed to the reported.
void RunTestOne() {
void RunTestOne()
{
TestRegistrationAtRuntime();
TestReporter test_reporter;
@ -139,7 +158,8 @@ void RunTestOne() {
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
@ -150,9 +170,9 @@ void RunTestOne() {
// Test that ClearRegisteredBenchmarks() clears all previously registered
// benchmarks.
// Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() {
assert(ExpectedResults.size() != 0 &&
"must have at least one registered benchmark");
void RunTestTwo()
{
assert(ExpectedResults.size() != 0 && "must have at least one registered benchmark");
ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks();
@ -168,7 +188,8 @@ void RunTestTwo() {
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
@ -176,7 +197,8 @@ void RunTestTwo() {
assert(EB == ExpectedResults.end());
}
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv);
RunTestOne();

View File

@ -10,21 +10,23 @@
// reporter in the presence of ReportAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
{
std::cout << "Precondition mismatch. Expected to only find three "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "

View File

@ -9,38 +9,34 @@
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
{"^[-]+$", MR_Next}});
static int AddContextCases() {
AddCases(TC_ConsoleErr,
ADD_CASES(TC_ConsoleOut,
{{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {"^[-]+$", MR_Next}});
static int AddContextCases()
{
AddCases(TC_ConsoleErr, {
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut,
{{"^\\{", MR_Default},
AddCases(TC_JSONOut, {{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"cpu_scaling_enabled\": ", MR_Next},
{"\"caches\": \\[$", MR_Next}});
auto const &Info = benchmark::CPUInfo::Get();
auto const &Caches = Info.caches;
if (!Caches.empty()) {
if (!Caches.empty())
{
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
for (size_t I = 0; I < Caches.size(); ++I) {
std::string num_caches_str =
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr,
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
MR_Next}});
for (size_t I = 0; I < Caches.size(); ++I)
{
std::string num_caches_str = Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr, {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
@ -50,9 +46,9 @@ static int AddContextCases() {
}
AddCases(TC_JSONOut, {{"],$"}});
auto const &LoadAvg = Info.load_avg;
if (!LoadAvg.empty()) {
AddCases(TC_ConsoleErr,
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
if (!LoadAvg.empty())
{
AddCases(TC_ConsoleErr, {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
@ -64,8 +60,10 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
void BM_basic(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_basic);
@ -88,8 +86,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
void BM_bytes_per_second(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -117,8 +117,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
void BM_items_per_second(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -146,8 +148,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ------------------------ Testing Label Output --------------------------- //
// ========================================================================= //
void BM_label(benchmark::State& state) {
for (auto _ : state) {
void BM_label(benchmark::State &state)
{
for (auto _ : state)
{
}
state.SetLabel("some label");
}
@ -173,9 +177,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
void BM_error(benchmark::State& state) {
void BM_error(benchmark::State &state)
{
state.SkipWithError("message");
for (auto _ : state) {
for (auto _ : state)
{
}
}
BENCHMARK(BM_error);
@ -196,8 +202,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// //
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
for (auto _ : state) {
void BM_no_arg_name(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
@ -214,8 +222,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
for (auto _ : state) {
void BM_arg_name(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
@ -232,15 +242,15 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
for (auto _ : state) {
void BM_arg_names(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -252,44 +262,46 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
void BM_BigArgs(benchmark::State& state) {
for (auto _ : state) {
void BM_BigArgs(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
{"^BM_BigArgs/2147483648 %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, {"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
void BM_Complexity_O1(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
{"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, {"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= //
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
for (auto _ : state) {
void BM_Repeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
// need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:2 %console_report$"},
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
@ -334,8 +346,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:3 %console_report$"},
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
@ -388,8 +399,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:4 %console_report$"},
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
@ -451,8 +461,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
for (auto _ : state) {
void BM_RepeatOnce(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
@ -466,19 +478,18 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
@ -508,19 +519,18 @@ ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
// Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) {
for (auto _ : state) {
void BM_SummaryDisplay(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
ADD_CASES(TC_JSONOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
@ -542,31 +552,26 @@ ADD_CASES(TC_JSONOut,
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) {
for (auto _ : state) {
void BM_RepeatTimeUnit(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(
TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
BENCHMARK(BM_RepeatTimeUnit)->Repetitions(3)->ReportAggregatesOnly()->Unit(benchmark::kMicrosecond);
ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
@ -591,8 +596,7 @@ ADD_CASES(TC_JSONOut,
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
@ -601,11 +605,11 @@ ADD_CASES(TC_CSVOut,
// -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) {
return v.back();
};
void BM_UserStats(benchmark::State& state) {
for (auto _ : state) {
const auto UserStatistics = [](const std::vector<double> &v) { return v.back(); };
void BM_UserStats(benchmark::State &state)
{
for (auto _ : state)
{
state.SetIterationTime(150 / 10e8);
}
}
@ -633,11 +637,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
@ -645,8 +646,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
@ -654,8 +654,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
@ -663,8 +662,7 @@ ADD_CASES(
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
@ -672,8 +670,7 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
@ -681,8 +678,7 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
@ -690,17 +686,14 @@ ADD_CASES(
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
@ -733,9 +726,11 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
// -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= //
void BM_CSV_Format(benchmark::State& state) {
void BM_CSV_Format(benchmark::State &state)
{
state.SkipWithError("\"freedom\"");
for (auto _ : state) {
for (auto _ : state)
{
}
}
BENCHMARK(BM_CSV_Format);
@ -745,4 +740,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -6,40 +6,52 @@
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual bool ReportContext(const Context& context) {
virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
TestReporter() {}
virtual ~TestReporter() {}
TestReporter()
{
}
virtual ~TestReporter()
{
}
mutable std::vector<Run> all_runs_;
};
struct TestCase {
struct TestCase
{
std::string name;
bool error_occurred;
std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name())
<< "expected " << name << " got " << run.benchmark_name();
void CheckRun(Run const &run) const
{
CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred) {
if (error_occurred)
{
// CHECK(run.iterations == 0);
} else {
}
else
{
CHECK(run.iterations != 0);
}
}
@ -47,8 +59,10 @@ struct TestCase {
std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
for (auto TC : v) {
int AddCases(const char *base_name, std::initializer_list<TestCase> const &v)
{
for (auto TC : v)
{
TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC));
}
@ -61,47 +75,59 @@ int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
} // end namespace
void BM_error_no_running(benchmark::State& state) {
void BM_error_no_running(benchmark::State &state)
{
state.SkipWithError("error message");
}
BENCHMARK(BM_error_no_running);
ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
void BM_error_before_running(benchmark::State& state) {
void BM_error_before_running(benchmark::State &state)
{
state.SkipWithError("error message");
while (state.KeepRunning()) {
while (state.KeepRunning())
{
assert(false);
}
}
BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_before_running_batch(benchmark::State& state) {
void BM_error_before_running_batch(benchmark::State &state)
{
state.SkipWithError("error message");
while (state.KeepRunningBatch(17)) {
while (state.KeepRunningBatch(17))
{
assert(false);
}
}
BENCHMARK(BM_error_before_running_batch);
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
void BM_error_before_running_range_for(benchmark::State& state) {
void BM_error_before_running_range_for(benchmark::State &state)
{
state.SkipWithError("error message");
for (auto _ : state) {
for (auto _ : state)
{
assert(false);
}
}
BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) {
void BM_error_during_running(benchmark::State &state)
{
int first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
while (state.KeepRunning())
{
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
} else {
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
@ -117,12 +143,15 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
void BM_error_during_running_ranged_for(benchmark::State& state) {
void BM_error_during_running_ranged_for(benchmark::State &state)
{
assert(state.max_iterations > 3 && "test requires at least a few iterations");
int first_iter = true;
// NOTE: Users should not write the for loop explicitly.
for (auto It = state.begin(), End = state.end(); It != End; ++It) {
if (state.range(0) == 1) {
for (auto It = state.begin(), End = state.end(); It != End; ++It)
{
if (state.range(0) == 1)
{
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
@ -135,11 +164,12 @@ void BM_error_during_running_ranged_for(benchmark::State& state) {
}
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
ADD_CASES("BM_error_during_running_ranged_for",
{{"/1/iterations:5", true, "error message"},
{"/2/iterations:5", false, ""}});
{{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}});
void BM_error_after_running(benchmark::State& state) {
for (auto _ : state) {
void BM_error_after_running(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
@ -151,15 +181,20 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) {
void BM_error_while_paused(benchmark::State &state)
{
bool first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
while (state.KeepRunning())
{
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter);
first_iter = false;
state.PauseTiming();
state.SkipWithError("error message");
} else {
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
@ -175,7 +210,8 @@ ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) {
int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
@ -184,7 +220,8 @@ int main(int argc, char* argv[]) {
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;

View File

@ -15,14 +15,16 @@ extern "C" {
using benchmark::State;
// CHECK-LABEL: test_for_auto_loop:
extern "C" int test_for_auto_loop() {
extern "C" int test_for_auto_loop()
{
State &S = GetState();
int x = 42;
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]]
for (auto _ : S) {
for (auto _ : S)
{
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
@ -38,13 +40,15 @@ extern "C" int test_for_auto_loop() {
}
// CHECK-LABEL: test_while_loop:
extern "C" int test_while_loop() {
extern "C" int test_while_loop()
{
State &S = GetState();
int x = 42;
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning()) {
while (S.KeepRunning())
{
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]]

View File

@ -5,24 +5,27 @@
#include "../src/statistics.h"
#include "gtest/gtest.h"
namespace {
TEST(StatisticsTest, Mean) {
namespace
{
TEST(StatisticsTest, Mean)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
}
TEST(StatisticsTest, Median) {
TEST(StatisticsTest, Median)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
}
TEST(StatisticsTest, StdDev) {
TEST(StatisticsTest, StdDev)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
1.151086443322134);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), 1.151086443322134);
}
} // end namespace

View File

@ -2,12 +2,14 @@
// statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===//
#include "../src/string_util.h"
#include "../src/internal_macros.h"
#include "../src/string_util.h"
#include "gtest/gtest.h"
namespace {
TEST(StringUtilTest, stoul) {
namespace
{
TEST(StringUtilTest, stoul)
{
{
size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
@ -68,12 +70,10 @@ TEST(StringUtilTest, stoul) {
#endif
}
TEST(StringUtilTest, stoi) {
{
size_t pos = 0;
TEST(StringUtilTest, stoi){{size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1ul, pos);
}
} // namespace
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
@ -116,7 +116,8 @@ TEST(StringUtilTest, stoi) {
#endif
}
TEST(StringUtilTest, stod) {
TEST(StringUtilTest, stod)
{
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));

View File

@ -4,22 +4,28 @@
#include <cassert>
#include <memory>
template <typename T>
class MyFixture : public ::benchmark::Fixture {
template <typename T> class MyFixture : public ::benchmark::Fixture
{
public:
MyFixture() : data(0) {}
MyFixture() : data(0)
{
}
T data;
};
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) {
for (auto _ : st) {
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st)
{
for (auto _ : st)
{
data += 1;
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
for (auto _ : st) {
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State &st)
{
for (auto _ : st)
{
data += 1.0;
}
}

View File

@ -7,8 +7,7 @@
// @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned.
ADD_CASES(
TC_ConsoleOut,
ADD_CASES(TC_ConsoleOut,
{
// keeping these lines long improves readability, so:
// clang-format off
@ -55,8 +54,10 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
@ -69,8 +70,7 @@ void BM_Counters_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -91,7 +91,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) {
void CheckTabular(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
@ -105,8 +106,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// -------------------- Tabular+Rate Counters Output ----------------------- //
// ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
void BM_CounterRates_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -121,10 +124,8 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
MR_Next},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
@ -144,7 +145,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabularRate(Results const& e) {
void CheckTabularRate(Results const &e)
{
double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
@ -153,16 +155,17 @@ void CheckTabularRate(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
&CheckTabularRate);
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate);
// ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
// set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) {
for (auto _ : state) {
void BM_CounterSet0_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
@ -172,8 +175,7 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -191,7 +193,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet0(Results const& e) {
void CheckSet0(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
@ -199,8 +202,10 @@ void CheckSet0(Results const& e) {
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again.
void BM_CounterSet1_Tabular(benchmark::State& state) {
for (auto _ : state) {
void BM_CounterSet1_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
@ -210,8 +215,7 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -229,7 +233,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet1(Results const& e) {
void CheckSet1(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
@ -241,8 +246,10 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// ========================================================================= //
// set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) {
for (auto _ : state) {
void BM_CounterSet2_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
@ -252,8 +259,7 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
});
}
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -271,7 +277,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet2(Results const& e) {
void CheckSet2(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
@ -282,4 +289,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -22,15 +22,16 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ------------------------- Simple Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_Simple(benchmark::State &state)
{
for (auto _ : state)
{
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
}
BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -47,7 +48,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSimple(Results const& e) {
void CheckSimple(Results const &e)
{
double its = e.NumIterations();
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the value of bar is within 0.1% of the expected value
@ -59,11 +61,14 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
// --------------------- Counters+Items+Bytes/s Output --------------------- //
// ========================================================================= //
namespace {
namespace
{
int num_calls1 = 0;
}
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_WithBytesAndItemsPSec(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -76,8 +81,7 @@ BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"bar=%hrfloat bytes_per_second=%hrfloat/s "
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -96,7 +100,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckBytesAndItemsPSec(Results const& e) {
void CheckBytesAndItemsPSec(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
@ -104,15 +109,16 @@ void CheckBytesAndItemsPSec(Results const& e) {
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
&CheckBytesAndItemsPSec);
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec);
// ========================================================================= //
// ------------------------- Rate Counters Output -------------------------- //
// ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_Rate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -121,9 +127,7 @@ void BM_Counters_Rate(benchmark::State& state) {
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
}
BENCHMARK(BM_Counters_Rate);
ADD_CASES(
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -140,7 +144,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckRate(Results const& e) {
void CheckRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
@ -152,8 +157,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ----------------------- Inverted Counters Output ------------------------ //
// ========================================================================= //
void BM_Invert(benchmark::State& state) {
for (auto _ : state) {
void BM_Invert(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -162,8 +169,7 @@ void BM_Invert(benchmark::State& state) {
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
}
BENCHMARK(BM_Invert);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
{"\"run_name\": \"BM_Invert\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -180,7 +186,8 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvert(Results const& e) {
void CheckInvert(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
}
@ -191,22 +198,21 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
// -------------------------- //
// ========================================================================= //
void BM_Counters_InvertedRate(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_InvertedRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] =
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
}
BENCHMARK(BM_Counters_InvertedRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
"bar=%hrfloats foo=%hrfloats$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_InvertedRate\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_InvertedRate\",$"},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -219,11 +225,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvertedRate(Results const& e) {
void CheckInvertedRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
@ -235,8 +241,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
// ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_Threads(benchmark::State &state)
{
for (auto _ : state)
{
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
@ -244,8 +252,7 @@ void BM_Counters_Threads(benchmark::State& state) {
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -258,12 +265,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThreads(Results const& e) {
void CheckThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
}
@ -273,8 +279,10 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_AvgThreads(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
@ -283,8 +291,7 @@ void BM_Counters_AvgThreads(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -297,24 +304,24 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreads(Results const& e) {
void CheckAvgThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
&CheckAvgThreads);
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", &CheckAvgThreads);
// ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_AvgThreadsRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
@ -325,10 +332,8 @@ void BM_Counters_AvgThreadsRate(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
MR_Next},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
@ -344,19 +349,21 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
"threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreadsRate(Results const& e) {
void CheckAvgThreadsRate(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
&CheckAvgThreadsRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", &CheckAvgThreadsRate);
// ========================================================================= //
// ------------------- IterationInvariant Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_IterationInvariant(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_IterationInvariant(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
@ -365,8 +372,7 @@ void BM_Counters_IterationInvariant(benchmark::State& state) {
BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -379,41 +385,38 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIterationInvariant(Results const& e) {
void CheckIterationInvariant(Results const &e)
{
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
&CheckIterationInvariant);
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", &CheckIterationInvariant);
// ========================================================================= //
// ----------------- IterationInvariantRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_kIsIterationInvariantRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
}
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
MR_Next},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
@ -429,22 +432,24 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIsIterationInvariantRate(Results const& e) {
void CheckIsIterationInvariantRate(Results const &e)
{
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
&CheckIsIterationInvariantRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate);
// ========================================================================= //
// ------------------- AvgIterations Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_AvgIterations(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_AvgIterations(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
@ -453,8 +458,7 @@ void BM_Counters_AvgIterations(benchmark::State& state) {
BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -467,11 +471,11 @@ ADD_CASES(TC_JSONOut,
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterations(Results const& e) {
void CheckAvgIterations(Results const &e)
{
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
@ -483,21 +487,21 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
// ----------------- AvgIterationsRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_kAvgIterationsRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
}
BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
@ -514,18 +518,21 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterationsRate(Results const& e) {
void CheckAvgIterationsRate(Results const &e)
{
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
&CheckAvgIterationsRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", &CheckAvgIterationsRate);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -8,27 +8,22 @@
// ------------------------ Thousands Customisation ------------------------ //
// ========================================================================= //
void BM_Counters_Thousands(benchmark::State& state) {
for (auto _ : state) {
void BM_Counters_Thousands(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"t0_1000000DefaultBase",
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
{"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
});
}
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
ADD_CASES(
TC_ConsoleOut,
{
ADD_CASES(TC_ConsoleOut, {
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
@ -49,8 +44,7 @@ ADD_CASES(
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
@ -66,8 +60,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
@ -83,8 +76,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
@ -100,8 +92,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
@ -117,8 +108,7 @@ ADD_CASES(TC_JSONOut,
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
@ -135,9 +125,7 @@ ADD_CASES(TC_JSONOut,
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Thousands/"
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
@ -152,13 +140,13 @@ ADD_CASES(
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThousands(Results const& e) {
void CheckThousands(Results const &e)
{
if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates!
// check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
@ -170,4 +158,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -40,7 +40,8 @@
#include "gtest/internal/gtest-death-test-internal.h"
namespace testing {
namespace testing
{
// This flag controls the style of death tests. Valid values are "threadsafe",
// meaning that the death test child process will re-execute the test binary
@ -51,7 +52,8 @@ GTEST_DECLARE_string_(death_test_style);
#if GTEST_HAS_DEATH_TEST
namespace internal {
namespace internal
{
// Returns a Boolean value indicating whether the caller is currently
// executing in the context of the death test child process. Tools such as
@ -165,32 +167,30 @@ GTEST_API_ bool InDeathTestChild();
// Asserts that a given statement causes the program to exit, with an
// integer exit status that satisfies predicate, and emitting error output
// that matches regex.
# define ASSERT_EXIT(statement, predicate, regex) \
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
#define ASSERT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
// Like ASSERT_EXIT, but continues on to successive tests in the
// test suite, if any:
# define EXPECT_EXIT(statement, predicate, regex) \
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
#define EXPECT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
// Asserts that a given statement causes the program to exit, either by
// explicitly exiting with a nonzero exit code or being killed by a
// signal, and emitting error output that matches regex.
# define ASSERT_DEATH(statement, regex) \
ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
#define ASSERT_DEATH(statement, regex) ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Like ASSERT_DEATH, but continues on to successive tests in the
// test suite, if any:
# define EXPECT_DEATH(statement, regex) \
EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
#define EXPECT_DEATH(statement, regex) EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
// Tests that an exit code describes a normal exit with a given exit code.
class GTEST_API_ ExitedWithCode {
class GTEST_API_ ExitedWithCode
{
public:
explicit ExitedWithCode(int exit_code);
bool operator()(int exit_status) const;
private:
// No implementation - assignment is unsupported.
void operator=(const ExitedWithCode &other);
@ -202,10 +202,12 @@ class GTEST_API_ ExitedWithCode {
// Tests that an exit code describes an exit due to termination by a
// given signal.
// GOOGLETEST_CM0006 DO NOT DELETE
class GTEST_API_ KilledBySignal {
class GTEST_API_ KilledBySignal
{
public:
explicit KilledBySignal(int signum);
bool operator()(int exit_status) const;
private:
const int signum_;
};
@ -256,19 +258,15 @@ class GTEST_API_ KilledBySignal {
//
#ifdef NDEBUG
# define EXPECT_DEBUG_DEATH(statement, regex) \
GTEST_EXECUTE_STATEMENT_(statement, regex)
#define EXPECT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \
GTEST_EXECUTE_STATEMENT_(statement, regex)
#define ASSERT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
#else
# define EXPECT_DEBUG_DEATH(statement, regex) \
EXPECT_DEATH(statement, regex)
#define EXPECT_DEBUG_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \
ASSERT_DEATH(statement, regex)
#define ASSERT_DEBUG_DEATH(statement, regex) ASSERT_DEATH(statement, regex)
#endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST
@ -310,15 +308,18 @@ class GTEST_API_ KilledBySignal {
// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
#define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \
GTEST_LOG_(WARNING) \
<< "Death tests are not supported on this platform.\n" \
if (::testing::internal::AlwaysTrue()) \
{ \
GTEST_LOG_(WARNING) << "Death tests are not supported on this platform.\n" \
<< "Statement '" #statement "' cannot be verified."; \
} else if (::testing::internal::AlwaysFalse()) { \
} \
else if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::RE::PartialMatch(".*", (regex)); \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
terminator; \
} else \
} \
else \
::testing::Message()
// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
@ -327,15 +328,11 @@ class GTEST_API_ KilledBySignal {
// useful when you are combining death test assertions with normal test
// assertions in one test.
#if GTEST_HAS_DEATH_TEST
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
EXPECT_DEATH(statement, regex)
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
ASSERT_DEATH(statement, regex)
#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) EXPECT_DEATH(statement, regex)
#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) ASSERT_DEATH(statement, regex)
#else
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return)
#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return )
#endif
} // namespace testing

View File

@ -55,12 +55,12 @@
#define GTEST_MAYBE_5046_
#endif
GTEST_DISABLE_MSC_WARNINGS_PUSH_(
4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by
clients of class B */
/* Symbol involving type with internal linkage not defined */)
namespace testing {
namespace testing
{
// To implement a matcher Foo for type T, define:
// 1. a class FooMatcherImpl that implements the
@ -77,30 +77,40 @@ namespace testing {
// MatchResultListener is an abstract class. Its << operator can be
// used by a matcher to explain why a value matches or doesn't match.
//
class MatchResultListener {
class MatchResultListener
{
public:
// Creates a listener object with the given underlying ostream. The
// listener does not own the ostream, and does not dereference it
// in the constructor or destructor.
explicit MatchResultListener(::std::ostream* os) : stream_(os) {}
explicit MatchResultListener(::std::ostream *os) : stream_(os)
{
}
virtual ~MatchResultListener() = 0; // Makes this class abstract.
// Streams x to the underlying ostream; does nothing if the ostream
// is NULL.
template <typename T>
MatchResultListener& operator<<(const T& x) {
if (stream_ != nullptr) *stream_ << x;
template <typename T> MatchResultListener &operator<<(const T &x)
{
if (stream_ != nullptr)
*stream_ << x;
return *this;
}
// Returns the underlying ostream.
::std::ostream* stream() { return stream_; }
::std::ostream *stream()
{
return stream_;
}
// Returns true if and only if the listener is interested in an explanation
// of the match result. A matcher's MatchAndExplain() method can use
// this information to avoid generating the explanation when no one
// intends to hear it.
bool IsInterested() const { return stream_ != nullptr; }
bool IsInterested() const
{
return stream_ != nullptr;
}
private:
::std::ostream *const stream_;
@ -108,14 +118,18 @@ class MatchResultListener {
GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener);
};
inline MatchResultListener::~MatchResultListener() {
inline MatchResultListener::~MatchResultListener()
{
}
// An instance of a subclass of this knows how to describe itself as a
// matcher.
class MatcherDescriberInterface {
class MatcherDescriberInterface
{
public:
virtual ~MatcherDescriberInterface() {}
virtual ~MatcherDescriberInterface()
{
}
// Describes this matcher to an ostream. The function should print
// a verb phrase that describes the property a value matching this
@ -130,7 +144,8 @@ class MatcherDescriberInterface {
// You are not required to override this when implementing
// MatcherInterface, but it is highly advised so that your matcher
// can produce good error messages.
virtual void DescribeNegationTo(::std::ostream* os) const {
virtual void DescribeNegationTo(::std::ostream *os) const
{
*os << "not (";
DescribeTo(os);
*os << ")";
@ -138,8 +153,8 @@ class MatcherDescriberInterface {
};
// The implementation of a matcher.
template <typename T>
class MatcherInterface : public MatcherDescriberInterface {
template <typename T> class MatcherInterface : public MatcherDescriberInterface
{
public:
// Returns true if and only if the matcher matches x; also explains the
// match result to 'listener' if necessary (see the next paragraph), in
@ -179,24 +194,33 @@ class MatcherInterface : public MatcherDescriberInterface {
// virtual void DescribeNegationTo(::std::ostream* os) const;
};
namespace internal {
namespace internal
{
// Converts a MatcherInterface<T> to a MatcherInterface<const T&>.
template <typename T>
class MatcherInterfaceAdapter : public MatcherInterface<const T&> {
template <typename T> class MatcherInterfaceAdapter : public MatcherInterface<const T &>
{
public:
explicit MatcherInterfaceAdapter(const MatcherInterface<T>* impl)
: impl_(impl) {}
~MatcherInterfaceAdapter() override { delete impl_; }
explicit MatcherInterfaceAdapter(const MatcherInterface<T> *impl) : impl_(impl)
{
}
~MatcherInterfaceAdapter() override
{
delete impl_;
}
void DescribeTo(::std::ostream* os) const override { impl_->DescribeTo(os); }
void DescribeTo(::std::ostream *os) const override
{
impl_->DescribeTo(os);
}
void DescribeNegationTo(::std::ostream* os) const override {
void DescribeNegationTo(::std::ostream *os) const override
{
impl_->DescribeNegationTo(os);
}
bool MatchAndExplain(const T& x,
MatchResultListener* listener) const override {
bool MatchAndExplain(const T &x, MatchResultListener *listener) const override
{
return impl_->MatchAndExplain(x, listener);
}
@ -206,35 +230,56 @@ class MatcherInterfaceAdapter : public MatcherInterface<const T&> {
GTEST_DISALLOW_COPY_AND_ASSIGN_(MatcherInterfaceAdapter);
};
struct AnyEq {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a == b; }
struct AnyEq
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a == b;
}
};
struct AnyNe {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a != b; }
struct AnyNe
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a != b;
}
};
struct AnyLt {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a < b; }
struct AnyLt
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a < b;
}
};
struct AnyGt {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a > b; }
struct AnyGt
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a > b;
}
};
struct AnyLe {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a <= b; }
struct AnyLe
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a <= b;
}
};
struct AnyGe {
template <typename A, typename B>
bool operator()(const A& a, const B& b) const { return a >= b; }
struct AnyGe
{
template <typename A, typename B> bool operator()(const A &a, const B &b) const
{
return a >= b;
}
};
// A match result listener that ignores the explanation.
class DummyMatchResultListener : public MatchResultListener {
class DummyMatchResultListener : public MatchResultListener
{
public:
DummyMatchResultListener() : MatchResultListener(nullptr) {}
DummyMatchResultListener() : MatchResultListener(nullptr)
{
}
private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener);
@ -243,10 +288,12 @@ class DummyMatchResultListener : public MatchResultListener {
// A match result listener that forwards the explanation to a given
// ostream. The difference between this and MatchResultListener is
// that the former is concrete.
class StreamMatchResultListener : public MatchResultListener {
class StreamMatchResultListener : public MatchResultListener
{
public:
explicit StreamMatchResultListener(::std::ostream* os)
: MatchResultListener(os) {}
explicit StreamMatchResultListener(::std::ostream *os) : MatchResultListener(os)
{
}
private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener);
@ -255,31 +302,38 @@ class StreamMatchResultListener : public MatchResultListener {
// An internal class for implementing Matcher<T>, which will derive
// from it. We put functionalities common to all Matcher<T>
// specializations here to avoid code duplication.
template <typename T>
class MatcherBase {
template <typename T> class MatcherBase
{
public:
// Returns true if and only if the matcher matches x; also explains the
// match result to 'listener'.
bool MatchAndExplain(const T& x, MatchResultListener* listener) const {
bool MatchAndExplain(const T &x, MatchResultListener *listener) const
{
return impl_->MatchAndExplain(x, listener);
}
// Returns true if and only if this matcher matches x.
bool Matches(const T& x) const {
bool Matches(const T &x) const
{
DummyMatchResultListener dummy;
return MatchAndExplain(x, &dummy);
}
// Describes this matcher to an ostream.
void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); }
void DescribeTo(::std::ostream *os) const
{
impl_->DescribeTo(os);
}
// Describes the negation of this matcher to an ostream.
void DescribeNegationTo(::std::ostream* os) const {
void DescribeNegationTo(::std::ostream *os) const
{
impl_->DescribeNegationTo(os);
}
// Explains why x matches, or doesn't match, the matcher.
void ExplainMatchResultTo(const T& x, ::std::ostream* os) const {
void ExplainMatchResultTo(const T &x, ::std::ostream *os) const
{
StreamMatchResultListener listener(os);
MatchAndExplain(x, &listener);
}
@ -287,29 +341,36 @@ class MatcherBase {
// Returns the describer for this matcher object; retains ownership
// of the describer, which is only guaranteed to be alive when
// this matcher object is alive.
const MatcherDescriberInterface* GetDescriber() const {
const MatcherDescriberInterface *GetDescriber() const
{
return impl_.get();
}
protected:
MatcherBase() {}
MatcherBase()
{
}
// Constructs a matcher from its implementation.
explicit MatcherBase(const MatcherInterface<const T&>* impl) : impl_(impl) {}
explicit MatcherBase(const MatcherInterface<const T &> *impl) : impl_(impl)
{
}
template <typename U>
explicit MatcherBase(
const MatcherInterface<U>* impl,
typename std::enable_if<!std::is_same<U, const U&>::value>::type* =
nullptr)
: impl_(new internal::MatcherInterfaceAdapter<U>(impl)) {}
explicit MatcherBase(const MatcherInterface<U> *impl,
typename std::enable_if<!std::is_same<U, const U &>::value>::type * = nullptr)
: impl_(new internal::MatcherInterfaceAdapter<U>(impl))
{
}
MatcherBase(const MatcherBase &) = default;
MatcherBase &operator=(const MatcherBase &) = default;
MatcherBase(MatcherBase &&) = default;
MatcherBase &operator=(MatcherBase &&) = default;
virtual ~MatcherBase() {}
virtual ~MatcherBase()
{
}
private:
std::shared_ptr<const MatcherInterface<const T &>> impl_;
@ -321,24 +382,27 @@ class MatcherBase {
// object that can check whether a value of type T matches. The
// implementation of Matcher<T> is just a std::shared_ptr to const
// MatcherInterface<T>. Don't inherit from Matcher!
template <typename T>
class Matcher : public internal::MatcherBase<T> {
template <typename T> class Matcher : public internal::MatcherBase<T>
{
public:
// Constructs a null matcher. Needed for storing Matcher objects in STL
// containers. A default-constructed matcher is not yet initialized. You
// cannot use it until a valid value has been assigned to it.
explicit Matcher() {} // NOLINT
explicit Matcher()
{
} // NOLINT
// Constructs a matcher from its implementation.
explicit Matcher(const MatcherInterface<const T&>* impl)
: internal::MatcherBase<T>(impl) {}
explicit Matcher(const MatcherInterface<const T &> *impl) : internal::MatcherBase<T>(impl)
{
}
template <typename U>
explicit Matcher(
const MatcherInterface<U>* impl,
typename std::enable_if<!std::is_same<U, const U&>::value>::type* =
nullptr)
: internal::MatcherBase<T>(impl) {}
explicit Matcher(const MatcherInterface<U> *impl,
typename std::enable_if<!std::is_same<U, const U &>::value>::type * = nullptr)
: internal::MatcherBase<T>(impl)
{
}
// Implicit constructor here allows people to write
// EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes
@ -348,14 +412,17 @@ class Matcher : public internal::MatcherBase<T> {
// The following two specializations allow the user to write str
// instead of Eq(str) and "foo" instead of Eq("foo") when a std::string
// matcher is expected.
template <>
class GTEST_API_ Matcher<const std::string&>
: public internal::MatcherBase<const std::string&> {
template <> class GTEST_API_ Matcher<const std::string &> : public internal::MatcherBase<const std::string &>
{
public:
Matcher() {}
Matcher()
{
}
explicit Matcher(const MatcherInterface<const std::string &> *impl)
: internal::MatcherBase<const std::string&>(impl) {}
: internal::MatcherBase<const std::string &>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object.
@ -365,16 +432,19 @@ class GTEST_API_ Matcher<const std::string&>
Matcher(const char *s); // NOLINT
};
template <>
class GTEST_API_ Matcher<std::string>
: public internal::MatcherBase<std::string> {
template <> class GTEST_API_ Matcher<std::string> : public internal::MatcherBase<std::string>
{
public:
Matcher() {}
Matcher()
{
}
explicit Matcher(const MatcherInterface<const std::string&>* impl)
: internal::MatcherBase<std::string>(impl) {}
explicit Matcher(const MatcherInterface<std::string>* impl)
: internal::MatcherBase<std::string>(impl) {}
explicit Matcher(const MatcherInterface<const std::string &> *impl) : internal::MatcherBase<std::string>(impl)
{
}
explicit Matcher(const MatcherInterface<std::string> *impl) : internal::MatcherBase<std::string>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where
// str is a string object.
@ -389,13 +459,17 @@ class GTEST_API_ Matcher<std::string>
// instead of Eq(str) and "foo" instead of Eq("foo") when a absl::string_view
// matcher is expected.
template <>
class GTEST_API_ Matcher<const absl::string_view&>
: public internal::MatcherBase<const absl::string_view&> {
class GTEST_API_ Matcher<const absl::string_view &> : public internal::MatcherBase<const absl::string_view &>
{
public:
Matcher() {}
Matcher()
{
}
explicit Matcher(const MatcherInterface<const absl::string_view &> *impl)
: internal::MatcherBase<const absl::string_view&>(impl) {}
: internal::MatcherBase<const absl::string_view &>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object.
@ -408,16 +482,20 @@ class GTEST_API_ Matcher<const absl::string_view&>
Matcher(absl::string_view s); // NOLINT
};
template <>
class GTEST_API_ Matcher<absl::string_view>
: public internal::MatcherBase<absl::string_view> {
template <> class GTEST_API_ Matcher<absl::string_view> : public internal::MatcherBase<absl::string_view>
{
public:
Matcher() {}
Matcher()
{
}
explicit Matcher(const MatcherInterface<const absl::string_view &> *impl)
: internal::MatcherBase<absl::string_view>(impl) {}
explicit Matcher(const MatcherInterface<absl::string_view>* impl)
: internal::MatcherBase<absl::string_view>(impl) {}
: internal::MatcherBase<absl::string_view>(impl)
{
}
explicit Matcher(const MatcherInterface<absl::string_view> *impl) : internal::MatcherBase<absl::string_view>(impl)
{
}
// Allows the user to write str instead of Eq(str) sometimes, where
// str is a std::string object.
@ -432,8 +510,8 @@ class GTEST_API_ Matcher<absl::string_view>
#endif // GTEST_HAS_ABSL
// Prints a matcher in a human-readable format.
template <typename T>
std::ostream& operator<<(std::ostream& os, const Matcher<T>& matcher) {
template <typename T> std::ostream &operator<<(std::ostream &os, const Matcher<T> &matcher)
{
matcher.DescribeTo(&os);
return os;
}
@ -450,37 +528,52 @@ std::ostream& operator<<(std::ostream& os, const Matcher<T>& matcher) {
// MatchResultListener* listener) const;
//
// See the definition of NotNull() for a complete example.
template <class Impl>
class PolymorphicMatcher {
template <class Impl> class PolymorphicMatcher
{
public:
explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {}
explicit PolymorphicMatcher(const Impl &an_impl) : impl_(an_impl)
{
}
// Returns a mutable reference to the underlying matcher
// implementation object.
Impl& mutable_impl() { return impl_; }
Impl &mutable_impl()
{
return impl_;
}
// Returns an immutable reference to the underlying matcher
// implementation object.
const Impl& impl() const { return impl_; }
const Impl &impl() const
{
return impl_;
}
template <typename T>
operator Matcher<T>() const {
template <typename T> operator Matcher<T>() const
{
return Matcher<T>(new MonomorphicImpl<const T &>(impl_));
}
private:
template <typename T>
class MonomorphicImpl : public MatcherInterface<T> {
template <typename T> class MonomorphicImpl : public MatcherInterface<T>
{
public:
explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
explicit MonomorphicImpl(const Impl &impl) : impl_(impl)
{
}
virtual void DescribeTo(::std::ostream* os) const { impl_.DescribeTo(os); }
virtual void DescribeTo(::std::ostream *os) const
{
impl_.DescribeTo(os);
}
virtual void DescribeNegationTo(::std::ostream* os) const {
virtual void DescribeNegationTo(::std::ostream *os) const
{
impl_.DescribeNegationTo(os);
}
virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
virtual bool MatchAndExplain(T x, MatchResultListener *listener) const
{
return impl_.MatchAndExplain(x, listener);
}
@ -497,8 +590,8 @@ class PolymorphicMatcher {
//
// MakeMatcher may create a Matcher that accepts its argument by value, which
// leads to unnecessary copies & lack of support for non-copyable types.
template <typename T>
inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
template <typename T> inline Matcher<T> MakeMatcher(const MatcherInterface<T> *impl)
{
return Matcher<T>(impl);
}
@ -509,12 +602,13 @@ inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
// MakePolymorphicMatcher(foo);
// vs
// PolymorphicMatcher<TypeOfFoo>(foo);
template <class Impl>
inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) {
template <class Impl> inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl &impl)
{
return PolymorphicMatcher<Impl>(impl);
}
namespace internal {
namespace internal
{
// Implements a matcher that compares a given value with a
// pre-supplied value using one of the ==, <=, <, etc, operators. The
// two values being compared don't have to have the same type.
@ -525,34 +619,44 @@ namespace internal {
//
// The following template definition assumes that the Rhs parameter is
// a "bare" type (i.e. neither 'const T' nor 'T&').
template <typename D, typename Rhs, typename Op>
class ComparisonBase {
template <typename D, typename Rhs, typename Op> class ComparisonBase
{
public:
explicit ComparisonBase(const Rhs& rhs) : rhs_(rhs) {}
template <typename Lhs>
operator Matcher<Lhs>() const {
explicit ComparisonBase(const Rhs &rhs) : rhs_(rhs)
{
}
template <typename Lhs> operator Matcher<Lhs>() const
{
return Matcher<Lhs>(new Impl<const Lhs &>(rhs_));
}
private:
template <typename T>
static const T& Unwrap(const T& v) { return v; }
template <typename T>
static const T& Unwrap(std::reference_wrapper<T> v) { return v; }
template <typename T> static const T &Unwrap(const T &v)
{
return v;
}
template <typename T> static const T &Unwrap(std::reference_wrapper<T> v)
{
return v;
}
template <typename Lhs, typename = Rhs>
class Impl : public MatcherInterface<Lhs> {
template <typename Lhs, typename = Rhs> class Impl : public MatcherInterface<Lhs>
{
public:
explicit Impl(const Rhs& rhs) : rhs_(rhs) {}
bool MatchAndExplain(Lhs lhs,
MatchResultListener* /* listener */) const override {
explicit Impl(const Rhs &rhs) : rhs_(rhs)
{
}
bool MatchAndExplain(Lhs lhs, MatchResultListener * /* listener */) const override
{
return Op()(lhs, Unwrap(rhs_));
}
void DescribeTo(::std::ostream* os) const override {
void DescribeTo(::std::ostream *os) const override
{
*os << D::Desc() << " ";
UniversalPrint(Unwrap(rhs_), os);
}
void DescribeNegationTo(::std::ostream* os) const override {
void DescribeNegationTo(::std::ostream *os) const override
{
*os << D::NegatedDesc() << " ";
UniversalPrint(Unwrap(rhs_), os);
}
@ -563,66 +667,110 @@ class ComparisonBase {
Rhs rhs_;
};
template <typename Rhs>
class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq> {
template <typename Rhs> class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>
{
public:
explicit EqMatcher(const Rhs& rhs)
: ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs) { }
static const char* Desc() { return "is equal to"; }
static const char* NegatedDesc() { return "isn't equal to"; }
explicit EqMatcher(const Rhs &rhs) : ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs)
{
}
static const char *Desc()
{
return "is equal to";
}
static const char *NegatedDesc()
{
return "isn't equal to";
}
};
template <typename Rhs>
class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe> {
template <typename Rhs> class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>
{
public:
explicit NeMatcher(const Rhs& rhs)
: ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs) { }
static const char* Desc() { return "isn't equal to"; }
static const char* NegatedDesc() { return "is equal to"; }
explicit NeMatcher(const Rhs &rhs) : ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs)
{
}
static const char *Desc()
{
return "isn't equal to";
}
static const char *NegatedDesc()
{
return "is equal to";
}
};
template <typename Rhs>
class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt> {
template <typename Rhs> class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>
{
public:
explicit LtMatcher(const Rhs& rhs)
: ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs) { }
static const char* Desc() { return "is <"; }
static const char* NegatedDesc() { return "isn't <"; }
explicit LtMatcher(const Rhs &rhs) : ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs)
{
}
static const char *Desc()
{
return "is <";
}
static const char *NegatedDesc()
{
return "isn't <";
}
};
template <typename Rhs>
class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt> {
template <typename Rhs> class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>
{
public:
explicit GtMatcher(const Rhs& rhs)
: ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs) { }
static const char* Desc() { return "is >"; }
static const char* NegatedDesc() { return "isn't >"; }
explicit GtMatcher(const Rhs &rhs) : ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs)
{
}
static const char *Desc()
{
return "is >";
}
static const char *NegatedDesc()
{
return "isn't >";
}
};
template <typename Rhs>
class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe> {
template <typename Rhs> class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>
{
public:
explicit LeMatcher(const Rhs& rhs)
: ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs) { }
static const char* Desc() { return "is <="; }
static const char* NegatedDesc() { return "isn't <="; }
explicit LeMatcher(const Rhs &rhs) : ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs)
{
}
static const char *Desc()
{
return "is <=";
}
static const char *NegatedDesc()
{
return "isn't <=";
}
};
template <typename Rhs>
class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe> {
template <typename Rhs> class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>
{
public:
explicit GeMatcher(const Rhs& rhs)
: ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs) { }
static const char* Desc() { return "is >="; }
static const char* NegatedDesc() { return "isn't >="; }
explicit GeMatcher(const Rhs &rhs) : ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs)
{
}
static const char *Desc()
{
return "is >=";
}
static const char *NegatedDesc()
{
return "isn't >=";
}
};
// Implements polymorphic matchers MatchesRegex(regex) and
// ContainsRegex(regex), which can be used as a Matcher<T> as long as
// T can be converted to a string.
class MatchesRegexMatcher {
class MatchesRegexMatcher
{
public:
MatchesRegexMatcher(const RE* regex, bool full_match)
: regex_(regex), full_match_(full_match) {}
MatchesRegexMatcher(const RE *regex, bool full_match) : regex_(regex), full_match_(full_match)
{
}
#if GTEST_HAS_ABSL
bool MatchAndExplain(const absl::string_view& s,
MatchResultListener* listener) const {
bool MatchAndExplain(const absl::string_view &s, MatchResultListener *listener) const
{
return MatchAndExplain(std::string(s), listener);
}
#endif // GTEST_HAS_ABSL
@ -632,8 +780,8 @@ class MatchesRegexMatcher {
// char*
// const wchar_t*
// wchar_t*
template <typename CharType>
bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
template <typename CharType> bool MatchAndExplain(CharType *s, MatchResultListener *listener) const
{
return s != nullptr && MatchAndExplain(std::string(s), listener);
}
@ -642,21 +790,21 @@ class MatchesRegexMatcher {
// This is a template, not just a plain function with const std::string&,
// because absl::string_view has some interfering non-explicit constructors.
template <class MatcheeStringType>
bool MatchAndExplain(const MatcheeStringType& s,
MatchResultListener* /* listener */) const {
bool MatchAndExplain(const MatcheeStringType &s, MatchResultListener * /* listener */) const
{
const std::string &s2(s);
return full_match_ ? RE::FullMatch(s2, *regex_)
: RE::PartialMatch(s2, *regex_);
return full_match_ ? RE::FullMatch(s2, *regex_) : RE::PartialMatch(s2, *regex_);
}
void DescribeTo(::std::ostream* os) const {
void DescribeTo(::std::ostream *os) const
{
*os << (full_match_ ? "matches" : "contains") << " regular expression ";
UniversalPrinter<std::string>::Print(regex_->pattern(), os);
}
void DescribeNegationTo(::std::ostream* os) const {
*os << "doesn't " << (full_match_ ? "match" : "contain")
<< " regular expression ";
void DescribeNegationTo(::std::ostream *os) const
{
*os << "doesn't " << (full_match_ ? "match" : "contain") << " regular expression ";
UniversalPrinter<std::string>::Print(regex_->pattern(), os);
}
@ -668,36 +816,40 @@ class MatchesRegexMatcher {
// Matches a string that fully matches regular expression 'regex'.
// The matcher takes ownership of 'regex'.
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
const internal::RE* regex) {
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(const internal::RE *regex)
{
return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true));
}
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
const std::string& regex) {
inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(const std::string &regex)
{
return MatchesRegex(new internal::RE(regex));
}
// Matches a string that contains regular expression 'regex'.
// The matcher takes ownership of 'regex'.
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
const internal::RE* regex) {
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(const internal::RE *regex)
{
return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false));
}
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
const std::string& regex) {
inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(const std::string &regex)
{
return ContainsRegex(new internal::RE(regex));
}
// Creates a polymorphic matcher that matches anything equal to x.
// Note: if the parameter of Eq() were declared as const T&, Eq("foo")
// wouldn't compile.
template <typename T>
inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); }
template <typename T> inline internal::EqMatcher<T> Eq(T x)
{
return internal::EqMatcher<T>(x);
}
// Constructs a Matcher<T> from a 'value' of type T. The constructed
// matcher matches any value that's equal to 'value'.
template <typename T>
Matcher<T>::Matcher(T value) { *this = Eq(value); }
template <typename T> Matcher<T>::Matcher(T value)
{
*this = Eq(value);
}
// Creates a monomorphic matcher that matches anything with type Lhs
// and equal to rhs. A user may need to use this instead of Eq(...)
@ -711,36 +863,38 @@ Matcher<T>::Matcher(T value) { *this = Eq(value); }
// it yet as those are used much less than Eq() in practice. A user
// can always write Matcher<T>(Lt(5)) to be explicit about the type,
// for example.
template <typename Lhs, typename Rhs>
inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); }
template <typename Lhs, typename Rhs> inline Matcher<Lhs> TypedEq(const Rhs &rhs)
{
return Eq(rhs);
}
// Creates a polymorphic matcher that matches anything >= x.
template <typename Rhs>
inline internal::GeMatcher<Rhs> Ge(Rhs x) {
template <typename Rhs> inline internal::GeMatcher<Rhs> Ge(Rhs x)
{
return internal::GeMatcher<Rhs>(x);
}
// Creates a polymorphic matcher that matches anything > x.
template <typename Rhs>
inline internal::GtMatcher<Rhs> Gt(Rhs x) {
template <typename Rhs> inline internal::GtMatcher<Rhs> Gt(Rhs x)
{
return internal::GtMatcher<Rhs>(x);
}
// Creates a polymorphic matcher that matches anything <= x.
template <typename Rhs>
inline internal::LeMatcher<Rhs> Le(Rhs x) {
template <typename Rhs> inline internal::LeMatcher<Rhs> Le(Rhs x)
{
return internal::LeMatcher<Rhs>(x);
}
// Creates a polymorphic matcher that matches anything < x.
template <typename Rhs>
inline internal::LtMatcher<Rhs> Lt(Rhs x) {
template <typename Rhs> inline internal::LtMatcher<Rhs> Lt(Rhs x)
{
return internal::LtMatcher<Rhs>(x);
}
// Creates a polymorphic matcher that matches anything != x.
template <typename Rhs>
inline internal::NeMatcher<Rhs> Ne(Rhs x) {
template <typename Rhs> inline internal::NeMatcher<Rhs> Ne(Rhs x)
{
return internal::NeMatcher<Rhs>(x);
}
} // namespace testing

View File

@ -52,14 +52,14 @@
#include "gtest/internal/gtest-port.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
// Ensures that there is at least one operator<< in the global namespace.
// See Message& operator<<(...) below for why.
void operator<<(const testing::internal::Secret &, int);
namespace testing {
namespace testing
{
// The Message class works like an ostream repeater.
//
@ -87,7 +87,8 @@ namespace testing {
// latter (it causes an access violation if you do). The Message
// class hides this difference by treating a NULL char pointer as
// "(null)".
class GTEST_API_ Message {
class GTEST_API_ Message
{
private:
// The type of basic IO manipulators (endl, ends, and flush) for
// narrow streams.
@ -98,18 +99,20 @@ class GTEST_API_ Message {
Message();
// Copy constructor.
Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
Message(const Message &msg) : ss_(new ::std::stringstream)
{ // NOLINT
*ss_ << msg.GetString();
}
// Constructs a Message from a C-string.
explicit Message(const char* str) : ss_(new ::std::stringstream) {
explicit Message(const char *str) : ss_(new ::std::stringstream)
{
*ss_ << str;
}
// Streams a non-pointer value to this object.
template <typename T>
inline Message& operator <<(const T& val) {
template <typename T> inline Message &operator<<(const T &val)
{
// Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std.
//
@ -142,11 +145,14 @@ class GTEST_API_ Message {
// may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL
// as "(null)".
template <typename T>
inline Message& operator <<(T* const& pointer) { // NOLINT
if (pointer == nullptr) {
template <typename T> inline Message &operator<<(T *const &pointer)
{ // NOLINT
if (pointer == nullptr)
{
*ss_ << "(null)";
} else {
}
else
{
*ss_ << pointer;
}
return *this;
@ -158,13 +164,15 @@ class GTEST_API_ Message {
// templatized version above. Without this definition, streaming
// endl or other basic IO manipulators to Message will confuse the
// compiler.
Message& operator <<(BasicNarrowIoManip val) {
Message &operator<<(BasicNarrowIoManip val)
{
*ss_ << val;
return *this;
}
// Instead of 1/0, we want to see true/false for bool values.
Message& operator <<(bool b) {
Message &operator<<(bool b)
{
return *this << (b ? "true" : "false");
}
@ -195,18 +203,20 @@ class GTEST_API_ Message {
};
// Streams a Message to an ostream.
inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
inline std::ostream &operator<<(std::ostream &os, const Message &sb)
{
return os << sb.GetString();
}
namespace internal {
namespace internal
{
// Converts a streamable value to an std::string. A NULL pointer is
// converted to "(null)". When the input value is a ::string,
// ::std::string, ::wstring, or ::std::wstring object, each NUL
// character in it is replaced with "\\0".
template <typename T>
std::string StreamableToString(const T& streamable) {
template <typename T> std::string StreamableToString(const T &streamable)
{
return (Message() << streamable).GetString();
}

View File

@ -36,7 +36,6 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
// Value-parameterized tests allow you to test your code with different
// parameters without writing multiple copies of the same test.
//
@ -181,7 +180,8 @@ TEST_P(DerivedTest, DoesBlah) {
#include "gtest/internal/gtest-param-util.h"
#include "gtest/internal/gtest-port.h"
namespace testing {
namespace testing
{
// Functions producing parameter generators.
//
@ -225,14 +225,13 @@ namespace testing {
// * Condition start < end must be satisfied in order for resulting sequences
// to contain any elements.
//
template <typename T, typename IncrementT>
internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
return internal::ParamGenerator<T>(
new internal::RangeGenerator<T, IncrementT>(start, end, step));
template <typename T, typename IncrementT> internal::ParamGenerator<T> Range(T start, T end, IncrementT step)
{
return internal::ParamGenerator<T>(new internal::RangeGenerator<T, IncrementT>(start, end, step));
}
template <typename T>
internal::ParamGenerator<T> Range(T start, T end) {
template <typename T> internal::ParamGenerator<T> Range(T start, T end)
{
return Range(start, end, 1);
}
@ -292,22 +291,20 @@ internal::ParamGenerator<T> Range(T start, T end) {
// ValuesIn(l.begin(), l.end()));
//
template <typename ForwardIterator>
internal::ParamGenerator<
typename std::iterator_traits<ForwardIterator>::value_type>
ValuesIn(ForwardIterator begin, ForwardIterator end) {
internal::ParamGenerator<typename std::iterator_traits<ForwardIterator>::value_type> ValuesIn(ForwardIterator begin,
ForwardIterator end)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
return internal::ParamGenerator<ParamType>(
new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
return internal::ParamGenerator<ParamType>(new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
}
template <typename T, size_t N>
internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
template <typename T, size_t N> internal::ParamGenerator<T> ValuesIn(const T (&array)[N])
{
return ValuesIn(array, array + N);
}
template <class Container>
internal::ParamGenerator<typename Container::value_type> ValuesIn(
const Container& container) {
template <class Container> internal::ParamGenerator<typename Container::value_type> ValuesIn(const Container &container)
{
return ValuesIn(container.begin(), container.end());
}
@ -331,8 +328,8 @@ internal::ParamGenerator<typename Container::value_type> ValuesIn(
// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
//
//
template <typename... T>
internal::ValueArray<T...> Values(T... v) {
template <typename... T> internal::ValueArray<T...> Values(T... v)
{
return internal::ValueArray<T...>(std::move(v)...);
}
@ -356,7 +353,8 @@ internal::ValueArray<T...> Values(T... v) {
// }
// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool());
//
inline internal::ParamGenerator<bool> Bool() {
inline internal::ParamGenerator<bool> Bool()
{
return Values(false, true);
}
@ -406,37 +404,36 @@ inline internal::ParamGenerator<bool> Bool() {
// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest,
// Combine(Bool(), Bool()));
//
template <typename... Generator>
internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
template <typename... Generator> internal::CartesianProductHolder<Generator...> Combine(const Generator &...g)
{
return internal::CartesianProductHolder<Generator...>(g...);
}
#define TEST_P(test_suite_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \
: public test_suite_name { \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) : public test_suite_name \
{ \
public: \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() \
{ \
} \
virtual void TestBody(); \
\
private: \
static int AddToRegistry() { \
static int AddToRegistry() \
{ \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \
#test_suite_name, \
.GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestPattern( \
GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_( \
test_suite_name, test_name)>()); \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \
test_name)); \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_suite_name, \
test_name)::gtest_registering_dummy_ = \
int GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
@ -459,42 +456,35 @@ internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
gtest_##prefix##test_suite_name##_EvalGenerator_() { \
gtest_##prefix##test_suite_name##_EvalGenerator_() \
{ \
return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
} \
static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
const ::testing::TestParamInfo<test_suite_name::ParamType>& info) { \
if (::testing::internal::AlwaysFalse()) { \
const ::testing::TestParamInfo<test_suite_name::ParamType> &info) \
{ \
if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))); \
__VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))); \
auto t = std::make_tuple(__VA_ARGS__); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, \
"Too Many Args!"); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, "Too Many Args!"); \
} \
return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))))(info); \
__VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))))(info); \
} \
static int gtest_##prefix##test_suite_name##_dummy_ \
GTEST_ATTRIBUTE_UNUSED_ = \
static int gtest_##prefix##test_suite_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \
#test_suite_name, \
.GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestSuiteInstantiation( \
#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
&gtest_##prefix##test_suite_name##_EvalGenerateName_, \
__FILE__, __LINE__)
->AddTestSuiteInstantiation(#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
&gtest_##prefix##test_suite_name##_EvalGenerateName_, __FILE__, __LINE__)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \
""); \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Google Test - The Google C++ Testing and Mocking Framework
//
// This file implements a universal value printer that can print a
@ -100,6 +99,8 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-port.h"
#include <functional>
#include <ostream> // NOLINT
#include <sstream>
@ -108,8 +109,6 @@
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-port.h"
#if GTEST_HAS_ABSL
#include "absl/strings/string_view.h"
@ -117,21 +116,22 @@
#include "absl/types/variant.h"
#endif // GTEST_HAS_ABSL
namespace testing {
namespace testing
{
// Definitions in the 'internal' and 'internal2' name spaces are
// subject to change without notice. DO NOT USE THEM IN USER CODE!
namespace internal2 {
namespace internal2
{
// Prints the given number of bytes in the given object to the given
// ostream.
GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
size_t count,
::std::ostream* os);
GTEST_API_ void PrintBytesInObjectTo(const unsigned char *obj_bytes, size_t count, ::std::ostream *os);
// For selecting which printer to use when a given type has neither <<
// nor PrintTo().
enum TypeKind {
enum TypeKind
{
kProtobuf, // a protobuf type
kConvertibleToInteger, // a type implicitly convertible to BiggestInt
// (e.g. a named or unnamed enum type)
@ -146,14 +146,13 @@ enum TypeKind {
// by the universal printer to print a value of type T when neither
// operator<< nor PrintTo() is defined for T, where kTypeKind is the
// "kind" of T as defined by enum TypeKind.
template <typename T, TypeKind kTypeKind>
class TypeWithoutFormatter {
template <typename T, TypeKind kTypeKind> class TypeWithoutFormatter
{
public:
// This default version is called when kTypeKind is kOtherType.
static void PrintValue(const T& value, ::std::ostream* os) {
PrintBytesInObjectTo(
static_cast<const unsigned char*>(
reinterpret_cast<const void*>(std::addressof(value))),
static void PrintValue(const T &value, ::std::ostream *os)
{
PrintBytesInObjectTo(static_cast<const unsigned char *>(reinterpret_cast<const void *>(std::addressof(value))),
sizeof(value), os);
}
};
@ -163,20 +162,22 @@ class TypeWithoutFormatter {
// DebugString() for better readability.
const size_t kProtobufOneLinerMaxLength = 50;
template <typename T>
class TypeWithoutFormatter<T, kProtobuf> {
template <typename T> class TypeWithoutFormatter<T, kProtobuf>
{
public:
static void PrintValue(const T& value, ::std::ostream* os) {
static void PrintValue(const T &value, ::std::ostream *os)
{
std::string pretty_str = value.ShortDebugString();
if (pretty_str.length() > kProtobufOneLinerMaxLength) {
if (pretty_str.length() > kProtobufOneLinerMaxLength)
{
pretty_str = "\n" + value.DebugString();
}
*os << ("<" + pretty_str + ">");
}
};
template <typename T>
class TypeWithoutFormatter<T, kConvertibleToInteger> {
template <typename T> class TypeWithoutFormatter<T, kConvertibleToInteger>
{
public:
// Since T has no << operator or PrintTo() but can be implicitly
// converted to BiggestInt, we print it as a BiggestInt.
@ -185,15 +186,16 @@ class TypeWithoutFormatter<T, kConvertibleToInteger> {
// case printing it as an integer is the desired behavior. In case
// T is not an enum, printing it as an integer is the best we can do
// given that it has no user-defined printer.
static void PrintValue(const T& value, ::std::ostream* os) {
static void PrintValue(const T &value, ::std::ostream *os)
{
const internal::BiggestInt kBigInt = value;
*os << kBigInt;
}
};
#if GTEST_HAS_ABSL
template <typename T>
class TypeWithoutFormatter<T, kConvertibleToStringView> {
template <typename T> class TypeWithoutFormatter<T, kConvertibleToStringView>
{
public:
// Since T has neither operator<< nor PrintTo() but can be implicitly
// converted to absl::string_view, we print it as a absl::string_view.
@ -229,18 +231,13 @@ class TypeWithoutFormatter<T, kConvertibleToStringView> {
// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
// specific.
template <typename Char, typename CharTraits, typename T>
::std::basic_ostream<Char, CharTraits>& operator<<(
::std::basic_ostream<Char, CharTraits>& os, const T& x) {
TypeWithoutFormatter<T, (internal::IsAProtocolMessage<T>::value
? kProtobuf
: std::is_convertible<
const T&, internal::BiggestInt>::value
? kConvertibleToInteger
::std::basic_ostream<Char, CharTraits> &operator<<(::std::basic_ostream<Char, CharTraits> &os, const T &x)
{
TypeWithoutFormatter<T, (internal::IsAProtocolMessage<T>::value ? kProtobuf
: std::is_convertible<const T &, internal::BiggestInt>::value ? kConvertibleToInteger
:
#if GTEST_HAS_ABSL
std::is_convertible<
const T&, absl::string_view>::value
? kConvertibleToStringView
std::is_convertible<const T &, absl::string_view>::value ? kConvertibleToStringView
:
#endif
kOtherType)>::PrintValue(x, &os);
@ -252,12 +249,13 @@ template <typename Char, typename CharTraits, typename T>
// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
// magic needed for implementing UniversalPrinter won't work.
namespace testing_internal {
namespace testing_internal
{
// Used to print a value that is not an STL-style container when the
// user doesn't define PrintTo() for it.
template <typename T>
void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
template <typename T> void DefaultPrintNonContainerTo(const T &value, ::std::ostream *os)
{
// With the following statement, during unqualified name lookup,
// testing::internal2::operator<< appears as if it was declared in
// the nearest enclosing namespace that contains both
@ -289,8 +287,10 @@ void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
} // namespace testing_internal
namespace testing {
namespace internal {
namespace testing
{
namespace internal
{
// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
// value of type ToPrint that is an operand of a comparison assertion
@ -307,19 +307,21 @@ namespace internal {
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
// The default case.
template <typename ToPrint, typename OtherOperand>
class FormatForComparison {
template <typename ToPrint, typename OtherOperand> class FormatForComparison
{
public:
static ::std::string Format(const ToPrint& value) {
static ::std::string Format(const ToPrint &value)
{
return ::testing::PrintToString(value);
}
};
// Array.
template <typename ToPrint, size_t N, typename OtherOperand>
class FormatForComparison<ToPrint[N], OtherOperand> {
template <typename ToPrint, size_t N, typename OtherOperand> class FormatForComparison<ToPrint[N], OtherOperand>
{
public:
static ::std::string Format(const ToPrint* value) {
static ::std::string Format(const ToPrint *value)
{
return FormatForComparison<const ToPrint *, OtherOperand>::Format(value);
}
};
@ -328,10 +330,11 @@ class FormatForComparison<ToPrint[N], OtherOperand> {
// whether they actually point to a NUL-terminated string.
#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \
template <typename OtherOperand> \
class FormatForComparison<CharType*, OtherOperand> { \
template <typename OtherOperand> class FormatForComparison<CharType *, OtherOperand> \
{ \
public: \
static ::std::string Format(CharType* value) { \
static ::std::string Format(CharType *value) \
{ \
return ::testing::PrintToString(static_cast<const void *>(value)); \
} \
}
@ -347,10 +350,11 @@ GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
// to point to a NUL-terminated string, and thus can print it as a string.
#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
template <> \
class FormatForComparison<CharType*, OtherStringType> { \
template <> class FormatForComparison<CharType *, OtherStringType> \
{ \
public: \
static ::std::string Format(CharType* value) { \
static ::std::string Format(CharType *value) \
{ \
return ::testing::PrintToString(value); \
} \
}
@ -374,8 +378,8 @@ GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
template <typename T1, typename T2>
std::string FormatForComparisonFailureMessage(
const T1& value, const T2& /* other_operand */) {
std::string FormatForComparisonFailureMessage(const T1 &value, const T2 & /* other_operand */)
{
return FormatForComparison<T1, T2>::Format(value);
}
@ -386,33 +390,36 @@ std::string FormatForComparisonFailureMessage(
// We define UniversalPrinter as a class template (as opposed to a
// function template), as we need to partially specialize it for
// reference types, which cannot be done with function templates.
template <typename T>
class UniversalPrinter;
template <typename T> class UniversalPrinter;
template <typename T>
void UniversalPrint(const T& value, ::std::ostream* os);
template <typename T> void UniversalPrint(const T &value, ::std::ostream *os);
enum DefaultPrinterType {
enum DefaultPrinterType
{
kPrintContainer,
kPrintPointer,
kPrintFunctionPointer,
kPrintOther,
};
template <DefaultPrinterType type> struct WrapPrinterType {};
template <DefaultPrinterType type> struct WrapPrinterType
{
};
// Used to print an STL-style container when the user doesn't define
// a PrintTo() for it.
template <typename C>
void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */,
const C& container, ::std::ostream* os) {
void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */, const C &container, ::std::ostream *os)
{
const size_t kMaxCount = 32; // The maximum number of elements to print.
*os << '{';
size_t count = 0;
for (typename C::const_iterator it = container.begin();
it != container.end(); ++it, ++count) {
if (count > 0) {
for (typename C::const_iterator it = container.begin(); it != container.end(); ++it, ++count)
{
if (count > 0)
{
*os << ',';
if (count == kMaxCount) { // Enough has been printed.
if (count == kMaxCount)
{ // Enough has been printed.
*os << " ...";
break;
}
@ -423,7 +430,8 @@ void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */,
internal::UniversalPrint(*it, os);
}
if (count > 0) {
if (count > 0)
{
*os << ' ';
}
*os << '}';
@ -435,24 +443,28 @@ void DefaultPrintTo(WrapPrinterType<kPrintContainer> /* dummy */,
// a location in the address space. Their representation is
// implementation-defined. Therefore they will be printed as raw
// bytes.)
template <typename T>
void DefaultPrintTo(WrapPrinterType<kPrintPointer> /* dummy */,
T* p, ::std::ostream* os) {
if (p == nullptr) {
template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintPointer> /* dummy */, T *p, ::std::ostream *os)
{
if (p == nullptr)
{
*os << "NULL";
} else {
}
else
{
// T is not a function type. We just call << to print p,
// relying on ADL to pick up user-defined << for their pointer
// types, if any.
*os << p;
}
}
template <typename T>
void DefaultPrintTo(WrapPrinterType<kPrintFunctionPointer> /* dummy */,
T* p, ::std::ostream* os) {
if (p == nullptr) {
template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintFunctionPointer> /* dummy */, T *p, ::std::ostream *os)
{
if (p == nullptr)
{
*os << "NULL";
} else {
}
else
{
// T is a function type, so '*os << p' doesn't do what we want
// (it just prints p as bool). We want to print p as a const
// void*.
@ -462,9 +474,8 @@ void DefaultPrintTo(WrapPrinterType<kPrintFunctionPointer> /* dummy */,
// Used to print a non-container, non-pointer value when the user
// doesn't define PrintTo() for it.
template <typename T>
void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */,
const T& value, ::std::ostream* os) {
template <typename T> void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */, const T &value, ::std::ostream *os)
{
::testing_internal::DefaultPrintNonContainerTo(value, os);
}
@ -479,8 +490,8 @@ void DefaultPrintTo(WrapPrinterType<kPrintOther> /* dummy */,
// Foo is not desirable (e.g. the coding style may prevent doing it,
// or there is already a << operator but it doesn't do what the user
// wants).
template <typename T>
void PrintTo(const T& value, ::std::ostream* os) {
template <typename T> void PrintTo(const T &value, ::std::ostream *os)
{
// DefaultPrintTo() is overloaded. The type of its first argument
// determines which version will be picked.
//
@ -500,15 +511,11 @@ void PrintTo(const T& value, ::std::ostream* os) {
// cause this warning, and use a separate overload of DefaultPrintTo for
// function pointers so that the `*os << p` in the object pointer overload
// doesn't cause that warning either.
DefaultPrintTo(
WrapPrinterType <
(sizeof(IsContainerTest<T>(0)) == sizeof(IsContainer)) &&
DefaultPrintTo(WrapPrinterType < (sizeof(IsContainerTest<T>(0)) == sizeof(IsContainer)) &&
!IsRecursiveContainer<T>::value
? kPrintContainer
: !std::is_pointer<T>::value
? kPrintOther
: std::is_function<typename std::remove_pointer<T>::type>::value
? kPrintFunctionPointer
: !std::is_pointer<T>::value ? kPrintOther
: std::is_function<typename std::remove_pointer<T>::type>::value ? kPrintFunctionPointer
: kPrintPointer > (),
value, os);
}
@ -520,7 +527,8 @@ void PrintTo(const T& value, ::std::ostream* os) {
// Overloads for various char types.
GTEST_API_ void PrintTo(unsigned char c, ::std::ostream *os);
GTEST_API_ void PrintTo(signed char c, ::std::ostream *os);
inline void PrintTo(char c, ::std::ostream* os) {
inline void PrintTo(char c, ::std::ostream *os)
{
// When printing a plain char, we always treat it as unsigned. This
// way, the output won't be affected by whether the compiler thinks
// char is signed or not.
@ -528,7 +536,8 @@ inline void PrintTo(char c, ::std::ostream* os) {
}
// Overloads for other simple built-in types.
inline void PrintTo(bool x, ::std::ostream* os) {
inline void PrintTo(bool x, ::std::ostream *os)
{
*os << (x ? "true" : "false");
}
@ -543,22 +552,27 @@ GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
// Overloads for C strings.
GTEST_API_ void PrintTo(const char *s, ::std::ostream *os);
inline void PrintTo(char* s, ::std::ostream* os) {
inline void PrintTo(char *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const char *>(s), os);
}
// signed/unsigned char is often used for representing binary data, so
// we print pointers to it as void* to be safe.
inline void PrintTo(const signed char* s, ::std::ostream* os) {
inline void PrintTo(const signed char *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const void *>(s), os);
}
inline void PrintTo(signed char* s, ::std::ostream* os) {
inline void PrintTo(signed char *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const void *>(s), os);
}
inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
inline void PrintTo(const unsigned char *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const void *>(s), os);
}
inline void PrintTo(unsigned char* s, ::std::ostream* os) {
inline void PrintTo(unsigned char *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const void *>(s), os);
}
@ -570,7 +584,8 @@ inline void PrintTo(unsigned char* s, ::std::ostream* os) {
#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
// Overloads for wide C strings
GTEST_API_ void PrintTo(const wchar_t *s, ::std::ostream *os);
inline void PrintTo(wchar_t* s, ::std::ostream* os) {
inline void PrintTo(wchar_t *s, ::std::ostream *os)
{
PrintTo(ImplicitCast_<const wchar_t *>(s), os);
}
#endif
@ -580,10 +595,11 @@ inline void PrintTo(wchar_t* s, ::std::ostream* os) {
// Prints the given number of elements in an array, without printing
// the curly braces.
template <typename T>
void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
template <typename T> void PrintRawArrayTo(const T a[], size_t count, ::std::ostream *os)
{
UniversalPrint(a[0], os);
for (size_t i = 1; i != count; i++) {
for (size_t i = 1; i != count; i++)
{
*os << ", ";
UniversalPrint(a[i], os);
}
@ -591,61 +607,66 @@ void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
// Overloads for ::std::string.
GTEST_API_ void PrintStringTo(const ::std::string &s, ::std::ostream *os);
inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
inline void PrintTo(const ::std::string &s, ::std::ostream *os)
{
PrintStringTo(s, os);
}
// Overloads for ::std::wstring.
#if GTEST_HAS_STD_WSTRING
GTEST_API_ void PrintWideStringTo(const ::std::wstring &s, ::std::ostream *os);
inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
inline void PrintTo(const ::std::wstring &s, ::std::ostream *os)
{
PrintWideStringTo(s, os);
}
#endif // GTEST_HAS_STD_WSTRING
#if GTEST_HAS_ABSL
// Overload for absl::string_view.
inline void PrintTo(absl::string_view sp, ::std::ostream* os) {
inline void PrintTo(absl::string_view sp, ::std::ostream *os)
{
PrintTo(::std::string(sp), os);
}
#endif // GTEST_HAS_ABSL
inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; }
inline void PrintTo(std::nullptr_t, ::std::ostream *os)
{
*os << "(nullptr)";
}
template <typename T>
void PrintTo(std::reference_wrapper<T> ref, ::std::ostream* os) {
template <typename T> void PrintTo(std::reference_wrapper<T> ref, ::std::ostream *os)
{
UniversalPrinter<T &>::Print(ref.get(), os);
}
// Helper function for printing a tuple. T must be instantiated with
// a tuple type.
template <typename T>
void PrintTupleTo(const T&, std::integral_constant<size_t, 0>,
::std::ostream*) {}
template <typename T> void PrintTupleTo(const T &, std::integral_constant<size_t, 0>, ::std::ostream *)
{
}
template <typename T, size_t I>
void PrintTupleTo(const T& t, std::integral_constant<size_t, I>,
::std::ostream* os) {
template <typename T, size_t I> void PrintTupleTo(const T &t, std::integral_constant<size_t, I>, ::std::ostream *os)
{
PrintTupleTo(t, std::integral_constant<size_t, I - 1>(), os);
GTEST_INTENTIONAL_CONST_COND_PUSH_()
if (I > 1) {
if (I > 1)
{
GTEST_INTENTIONAL_CONST_COND_POP_()
*os << ", ";
}
UniversalPrinter<typename std::tuple_element<I - 1, T>::type>::Print(
std::get<I - 1>(t), os);
UniversalPrinter<typename std::tuple_element<I - 1, T>::type>::Print(std::get<I - 1>(t), os);
}
template <typename... Types>
void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {
template <typename... Types> void PrintTo(const ::std::tuple<Types...> &t, ::std::ostream *os)
{
*os << "(";
PrintTupleTo(t, std::integral_constant<size_t, sizeof...(Types)>(), os);
*os << ")";
}
// Overload for std::pair.
template <typename T1, typename T2>
void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
template <typename T1, typename T2> void PrintTo(const ::std::pair<T1, T2> &value, ::std::ostream *os)
{
*os << '(';
// We cannot use UniversalPrint(value.first, os) here, as T1 may be
// a reference type. The same for printing value.second.
@ -657,8 +678,8 @@ void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
// Implements printing a non-reference type T by letting the compiler
// pick the right overload of PrintTo() for T.
template <typename T>
class UniversalPrinter {
template <typename T> class UniversalPrinter
{
public:
// MSVC warns about adding const to a function type, so we want to
// disable the warning.
@ -667,7 +688,8 @@ class UniversalPrinter {
// Note: we deliberately don't call this PrintTo(), as that name
// conflicts with ::testing::internal::PrintTo in the body of the
// function.
static void Print(const T& value, ::std::ostream* os) {
static void Print(const T &value, ::std::ostream *os)
{
// By default, ::testing::internal::PrintTo() is used for printing
// the value.
//
@ -686,14 +708,18 @@ class UniversalPrinter {
// Printer for absl::optional
template <typename T>
class UniversalPrinter<::absl::optional<T>> {
template <typename T> class UniversalPrinter<::absl::optional<T>>
{
public:
static void Print(const ::absl::optional<T>& value, ::std::ostream* os) {
static void Print(const ::absl::optional<T> &value, ::std::ostream *os)
{
*os << '(';
if (!value) {
if (!value)
{
*os << "nullopt";
} else {
}
else
{
UniversalPrint(*value, os);
}
*os << ')';
@ -702,19 +728,21 @@ class UniversalPrinter<::absl::optional<T>> {
// Printer for absl::variant
template <typename... T>
class UniversalPrinter<::absl::variant<T...>> {
template <typename... T> class UniversalPrinter<::absl::variant<T...>>
{
public:
static void Print(const ::absl::variant<T...>& value, ::std::ostream* os) {
static void Print(const ::absl::variant<T...> &value, ::std::ostream *os)
{
*os << '(';
absl::visit(Visitor{os}, value);
*os << ')';
}
private:
struct Visitor {
template <typename U>
void operator()(const U& u) const {
struct Visitor
{
template <typename U> void operator()(const U &u) const
{
*os << "'" << GetTypeName<U>() << "' with value ";
UniversalPrint(u, os);
}
@ -726,20 +754,26 @@ class UniversalPrinter<::absl::variant<T...>> {
// UniversalPrintArray(begin, len, os) prints an array of 'len'
// elements, starting at address 'begin'.
template <typename T>
void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
if (len == 0) {
template <typename T> void UniversalPrintArray(const T *begin, size_t len, ::std::ostream *os)
{
if (len == 0)
{
*os << "{}";
} else {
}
else
{
*os << "{ ";
const size_t kThreshold = 18;
const size_t kChunkSize = 8;
// If the array has more than kThreshold elements, we'll have to
// omit some details by printing only the first and the last
// kChunkSize elements.
if (len <= kThreshold) {
if (len <= kThreshold)
{
PrintRawArrayTo(begin, len, os);
} else {
}
else
{
PrintRawArrayTo(begin, kChunkSize, os);
*os << ", ..., ";
PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
@ -748,33 +782,33 @@ void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
}
}
// This overload prints a (const) char array compactly.
GTEST_API_ void UniversalPrintArray(
const char* begin, size_t len, ::std::ostream* os);
GTEST_API_ void UniversalPrintArray(const char *begin, size_t len, ::std::ostream *os);
// This overload prints a (const) wchar_t array compactly.
GTEST_API_ void UniversalPrintArray(
const wchar_t* begin, size_t len, ::std::ostream* os);
GTEST_API_ void UniversalPrintArray(const wchar_t *begin, size_t len, ::std::ostream *os);
// Implements printing an array type T[N].
template <typename T, size_t N>
class UniversalPrinter<T[N]> {
template <typename T, size_t N> class UniversalPrinter<T[N]>
{
public:
// Prints the given array, omitting some elements when there are too
// many.
static void Print(const T (&a)[N], ::std::ostream* os) {
static void Print(const T (&a)[N], ::std::ostream *os)
{
UniversalPrintArray(a, N, os);
}
};
// Implements printing a reference type T&.
template <typename T>
class UniversalPrinter<T&> {
template <typename T> class UniversalPrinter<T &>
{
public:
// MSVC warns about adding const to a function type, so we want to
// disable the warning.
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
static void Print(const T& value, ::std::ostream* os) {
static void Print(const T &value, ::std::ostream *os)
{
// Prints the address of the value. We use reinterpret_cast here
// as static_cast doesn't compile when T is a function type.
*os << "@" << reinterpret_cast<const void *>(&value) << " ";
@ -790,70 +824,83 @@ class UniversalPrinter<T&> {
// (but not the address) is printed; for a (const) char pointer, the
// NUL-terminated string (but not the pointer) is printed.
template <typename T>
class UniversalTersePrinter {
template <typename T> class UniversalTersePrinter
{
public:
static void Print(const T& value, ::std::ostream* os) {
static void Print(const T &value, ::std::ostream *os)
{
UniversalPrint(value, os);
}
};
template <typename T>
class UniversalTersePrinter<T&> {
template <typename T> class UniversalTersePrinter<T &>
{
public:
static void Print(const T& value, ::std::ostream* os) {
static void Print(const T &value, ::std::ostream *os)
{
UniversalPrint(value, os);
}
};
template <typename T, size_t N>
class UniversalTersePrinter<T[N]> {
template <typename T, size_t N> class UniversalTersePrinter<T[N]>
{
public:
static void Print(const T (&value)[N], ::std::ostream* os) {
static void Print(const T (&value)[N], ::std::ostream *os)
{
UniversalPrinter<T[N]>::Print(value, os);
}
};
template <>
class UniversalTersePrinter<const char*> {
template <> class UniversalTersePrinter<const char *>
{
public:
static void Print(const char* str, ::std::ostream* os) {
if (str == nullptr) {
static void Print(const char *str, ::std::ostream *os)
{
if (str == nullptr)
{
*os << "NULL";
} else {
}
else
{
UniversalPrint(std::string(str), os);
}
}
};
template <>
class UniversalTersePrinter<char*> {
template <> class UniversalTersePrinter<char *>
{
public:
static void Print(char* str, ::std::ostream* os) {
static void Print(char *str, ::std::ostream *os)
{
UniversalTersePrinter<const char *>::Print(str, os);
}
};
#if GTEST_HAS_STD_WSTRING
template <>
class UniversalTersePrinter<const wchar_t*> {
template <> class UniversalTersePrinter<const wchar_t *>
{
public:
static void Print(const wchar_t* str, ::std::ostream* os) {
if (str == nullptr) {
static void Print(const wchar_t *str, ::std::ostream *os)
{
if (str == nullptr)
{
*os << "NULL";
} else {
}
else
{
UniversalPrint(::std::wstring(str), os);
}
}
};
#endif
template <>
class UniversalTersePrinter<wchar_t*> {
template <> class UniversalTersePrinter<wchar_t *>
{
public:
static void Print(wchar_t* str, ::std::ostream* os) {
static void Print(wchar_t *str, ::std::ostream *os)
{
UniversalTersePrinter<const wchar_t *>::Print(str, os);
}
};
template <typename T>
void UniversalTersePrint(const T& value, ::std::ostream* os) {
template <typename T> void UniversalTersePrint(const T &value, ::std::ostream *os)
{
UniversalTersePrinter<T>::Print(value, os);
}
@ -861,8 +908,8 @@ void UniversalTersePrint(const T& value, ::std::ostream* os) {
// difference between this and UniversalTersePrint() is that for a
// (const) char pointer, this prints both the pointer and the
// NUL-terminated string.
template <typename T>
void UniversalPrint(const T& value, ::std::ostream* os) {
template <typename T> void UniversalPrint(const T &value, ::std::ostream *os)
{
// A workarond for the bug in VC++ 7.1 that prevents us from instantiating
// UniversalPrinter with T directly.
typedef T T1;
@ -873,15 +920,13 @@ typedef ::std::vector< ::std::string> Strings;
// Tersely prints the first N fields of a tuple to a string vector,
// one element for each field.
template <typename Tuple>
void TersePrintPrefixToStrings(const Tuple&, std::integral_constant<size_t, 0>,
Strings*) {}
template <typename Tuple> void TersePrintPrefixToStrings(const Tuple &, std::integral_constant<size_t, 0>, Strings *)
{
}
template <typename Tuple, size_t I>
void TersePrintPrefixToStrings(const Tuple& t,
std::integral_constant<size_t, I>,
Strings* strings) {
TersePrintPrefixToStrings(t, std::integral_constant<size_t, I - 1>(),
strings);
void TersePrintPrefixToStrings(const Tuple &t, std::integral_constant<size_t, I>, Strings *strings)
{
TersePrintPrefixToStrings(t, std::integral_constant<size_t, I - 1>(), strings);
::std::stringstream ss;
UniversalTersePrint(std::get<I - 1>(t), &ss);
strings->push_back(ss.str());
@ -890,29 +935,28 @@ void TersePrintPrefixToStrings(const Tuple& t,
// Prints the fields of a tuple tersely to a string vector, one
// element for each field. See the comment before
// UniversalTersePrint() for how we define "tersely".
template <typename Tuple>
Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
template <typename Tuple> Strings UniversalTersePrintTupleFieldsToStrings(const Tuple &value)
{
Strings result;
TersePrintPrefixToStrings(
value, std::integral_constant<size_t, std::tuple_size<Tuple>::value>(),
&result);
TersePrintPrefixToStrings(value, std::integral_constant<size_t, std::tuple_size<Tuple>::value>(), &result);
return result;
}
} // namespace internal
#if GTEST_HAS_ABSL
namespace internal2 {
namespace internal2
{
template <typename T>
void TypeWithoutFormatter<T, kConvertibleToStringView>::PrintValue(
const T& value, ::std::ostream* os) {
void TypeWithoutFormatter<T, kConvertibleToStringView>::PrintValue(const T &value, ::std::ostream *os)
{
internal::PrintTo(absl::string_view(value), os);
}
} // namespace internal2
#endif
template <typename T>
::std::string PrintToString(const T& value) {
template <typename T>::std::string PrintToString(const T &value)
{
::std::stringstream ss;
internal::UniversalTersePrinter<T>::Print(value, &ss);
return ss.str();

View File

@ -38,10 +38,10 @@
#include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
namespace testing {
namespace testing
{
// This helper class can be used to mock out Google Test failure reporting
// so that we can test Google Test or code that builds on Google Test.
@ -52,11 +52,12 @@ namespace testing {
// generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor.
class GTEST_API_ ScopedFakeTestPartResultReporter
: public TestPartResultReporterInterface {
class GTEST_API_ ScopedFakeTestPartResultReporter : public TestPartResultReporterInterface
{
public:
// The two possible mocking modes of this object.
enum InterceptMode {
enum InterceptMode
{
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
INTERCEPT_ALL_THREADS // Intercepts all failures.
};
@ -68,8 +69,7 @@ class GTEST_API_ ScopedFakeTestPartResultReporter
explicit ScopedFakeTestPartResultReporter(TestPartResultArray *result);
// Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
TestPartResultArray* result);
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, TestPartResultArray *result);
// The d'tor restores the previous test part result reporter.
~ScopedFakeTestPartResultReporter() override;
@ -91,19 +91,21 @@ class GTEST_API_ ScopedFakeTestPartResultReporter
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
};
namespace internal {
namespace internal
{
// A helper class for implementing EXPECT_FATAL_FAILURE() and
// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
// TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated.
class GTEST_API_ SingleFailureChecker {
class GTEST_API_ SingleFailureChecker
{
public:
// The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray* results,
TestPartResult::Type type, const std::string& substr);
SingleFailureChecker(const TestPartResultArray *results, TestPartResult::Type type, const std::string &substr);
~SingleFailureChecker();
private:
const TestPartResultArray *const results_;
const TestPartResult::Type type_;
@ -142,35 +144,43 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
// gtest_unittest.cc will fail to compile if we do that.
#define EXPECT_FATAL_FAILURE(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
do \
{ \
class GTestExpectFatalFailureHelper \
{ \
public: \
static void Execute() { statement; }\
static void Execute() \
{ \
statement; \
} \
}; \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
::testing::TestPartResult::kFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
do \
{ \
class GTestExpectFatalFailureHelper \
{ \
public: \
static void Execute() { statement; }\
static void Execute() \
{ \
statement; \
} \
}; \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
::testing::TestPartResult::kFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ALL_THREADS, &gtest_failures);\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
@ -208,30 +218,34 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
// to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\
do \
{ \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker( \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
if (::testing::internal::AlwaysTrue()) \
{ \
statement; \
} \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\
do \
{ \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker( \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
&gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
if (::testing::internal::AlwaysTrue()) \
{ \
statement; \
} \
} \
} while (::testing::internal::AlwaysFalse())

View File

@ -32,25 +32,27 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#include <iosfwd>
#include <vector>
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-string.h"
#include <iosfwd>
#include <vector>
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
namespace testing {
namespace testing
{
// A copyable object representing the result of a test part (i.e. an
// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
//
// Don't inherit from TestPartResult as its destructor is not virtual.
class GTEST_API_ TestPartResult {
class GTEST_API_ TestPartResult
{
public:
// The possible outcomes of a test part (i.e. an assertion or an
// explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
enum Type {
enum Type
{
kSuccess, // Succeeded.
kNonFatalFailure, // Failed but the test can continue.
kFatalFailure, // Failed and the test should be terminated.
@ -60,47 +62,73 @@ class GTEST_API_ TestPartResult {
// C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a
// TestPartResult object.
TestPartResult(Type a_type, const char* a_file_name, int a_line_number,
const char* a_message)
: type_(a_type),
file_name_(a_file_name == nullptr ? "" : a_file_name),
line_number_(a_line_number),
summary_(ExtractSummary(a_message)),
message_(a_message) {}
TestPartResult(Type a_type, const char *a_file_name, int a_line_number, const char *a_message)
: type_(a_type), file_name_(a_file_name == nullptr ? "" : a_file_name), line_number_(a_line_number),
summary_(ExtractSummary(a_message)), message_(a_message)
{
}
// Gets the outcome of the test part.
Type type() const { return type_; }
Type type() const
{
return type_;
}
// Gets the name of the source file where the test part took place, or
// NULL if it's unknown.
const char* file_name() const {
const char *file_name() const
{
return file_name_.empty() ? nullptr : file_name_.c_str();
}
// Gets the line in the source file where the test part took place,
// or -1 if it's unknown.
int line_number() const { return line_number_; }
int line_number() const
{
return line_number_;
}
// Gets the summary of the failure message.
const char* summary() const { return summary_.c_str(); }
const char *summary() const
{
return summary_.c_str();
}
// Gets the message associated with the test part.
const char* message() const { return message_.c_str(); }
const char *message() const
{
return message_.c_str();
}
// Returns true if and only if the test part was skipped.
bool skipped() const { return type_ == kSkip; }
bool skipped() const
{
return type_ == kSkip;
}
// Returns true if and only if the test part passed.
bool passed() const { return type_ == kSuccess; }
bool passed() const
{
return type_ == kSuccess;
}
// Returns true if and only if the test part non-fatally failed.
bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
bool nonfatally_failed() const
{
return type_ == kNonFatalFailure;
}
// Returns true if and only if the test part fatally failed.
bool fatally_failed() const { return type_ == kFatalFailure; }
bool fatally_failed() const
{
return type_ == kFatalFailure;
}
// Returns true if and only if the test part failed.
bool failed() const { return fatally_failed() || nonfatally_failed(); }
bool failed() const
{
return fatally_failed() || nonfatally_failed();
}
private:
Type type_;
@ -126,9 +154,12 @@ std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
//
// Don't inherit from TestPartResultArray as its destructor is not
// virtual.
class GTEST_API_ TestPartResultArray {
class GTEST_API_ TestPartResultArray
{
public:
TestPartResultArray() {}
TestPartResultArray()
{
}
// Appends the given TestPartResult to the array.
void Append(const TestPartResult &result);
@ -146,14 +177,18 @@ class GTEST_API_ TestPartResultArray {
};
// This interface knows how to report a test part result.
class GTEST_API_ TestPartResultReporterInterface {
class GTEST_API_ TestPartResultReporterInterface
{
public:
virtual ~TestPartResultReporterInterface() {}
virtual ~TestPartResultReporterInterface()
{
}
virtual void ReportTestPartResult(const TestPartResult &result) = 0;
};
namespace internal {
namespace internal
{
// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
// statement generates new fatal failures. To do so it registers itself as the
@ -161,13 +196,17 @@ namespace internal {
// reported, it only delegates the reporting to the former result reporter.
// The original result reporter is restored in the destructor.
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
class GTEST_API_ HasNewFatalFailureHelper
: public TestPartResultReporterInterface {
class GTEST_API_ HasNewFatalFailureHelper : public TestPartResultReporterInterface
{
public:
HasNewFatalFailureHelper();
~HasNewFatalFailureHelper() override;
void ReportTestPartResult(const TestPartResult &result) override;
bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
bool has_new_fatal_failure() const
{
return has_new_fatal_failure_;
}
private:
bool has_new_fatal_failure_;
TestPartResultReporterInterface *original_reporter_;

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// GOOGLETEST_CM0001 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
@ -185,41 +184,29 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// Expands to the name of the typedef for the NameGenerator, responsible for
// creating the suffixes of the name.
#define GTEST_NAME_GENERATOR_(TestSuiteName) \
gtest_type_params_##TestSuiteName##_NameGenerator
#define GTEST_NAME_GENERATOR_(TestSuiteName) gtest_type_params_##TestSuiteName##_NameGenerator
#define TYPED_TEST_SUITE(CaseName, Types, ...) \
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_( \
CaseName); \
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \
GTEST_NAME_GENERATOR_(CaseName)
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_(CaseName); \
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type GTEST_NAME_GENERATOR_(CaseName)
#define TYPED_TEST(CaseName, TestName) \
template <typename gtest_TypeParam_> \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
: public CaseName<gtest_TypeParam_> { \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) : public CaseName<gtest_TypeParam_> \
{ \
private: \
typedef CaseName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##CaseName##_##TestName##_registered_ \
GTEST_ATTRIBUTE_UNUSED_ = \
static bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTest< \
CaseName, \
::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)>, \
GTEST_TYPE_PARAMS_( \
CaseName)>::Register("", \
::testing::internal::CodeLocation( \
__FILE__, __LINE__), \
#CaseName, #TestName, 0, \
::testing::internal::GenerateNames< \
GTEST_NAME_GENERATOR_(CaseName), \
GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> \
void GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)<gtest_TypeParam_>::TestBody()
CaseName, ::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
GTEST_TYPE_PARAMS_(CaseName)>:: \
Register( \
"", ::testing::internal::CodeLocation(__FILE__, __LINE__), #CaseName, #TestName, 0, \
::testing::internal::GenerateNames<GTEST_NAME_GENERATOR_(CaseName), GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
@ -245,22 +232,19 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
//
// Expands to the name of the variable used to remember the names of
// the defined tests in the given test suite.
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \
gtest_typed_test_suite_p_state_##TestSuiteName##_
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) gtest_typed_test_suite_p_state_##TestSuiteName##_
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
//
// Expands to the name of the variable used to remember the names of
// the registered tests in the given test suite.
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \
gtest_registered_test_names_##TestSuiteName##_
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) gtest_registered_test_names_##TestSuiteName##_
// The variables defined in the type-parameterized test macros are
// static as typically these macros are used in a .h file that can be
// #included in multiple translation units linked together.
#define TYPED_TEST_SUITE_P(SuiteName) \
static ::testing::internal::TypedTestSuitePState \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
static ::testing::internal::TypedTestSuitePState GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
@ -270,58 +254,48 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_P(SuiteName, TestName) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
template <typename gtest_TypeParam_> \
class TestName : public SuiteName<gtest_TypeParam_> { \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
{ \
template <typename gtest_TypeParam_> class TestName : public SuiteName<gtest_TypeParam_> \
{ \
private: \
typedef SuiteName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \
__FILE__, __LINE__, #SuiteName, #TestName); \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName(__FILE__, __LINE__, #SuiteName, #TestName); \
} \
template <typename gtest_TypeParam_> \
void GTEST_SUITE_NAMESPACE_( \
SuiteName)::TestName<gtest_TypeParam_>::TestBody()
template <typename gtest_TypeParam_> void GTEST_SUITE_NAMESPACE_(SuiteName)::TestName<gtest_TypeParam_>::TestBody()
#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
{ \
typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
} \
static const char* const GTEST_REGISTERED_TEST_NAMES_( \
SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \
__FILE__, __LINE__, #__VA_ARGS__)
static const char *const GTEST_REGISTERED_TEST_NAMES_(SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames(__FILE__, __LINE__, #__VA_ARGS__)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define REGISTER_TYPED_TEST_CASE_P \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \
""); \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), ""); \
REGISTER_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTestSuite< \
SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
::testing::internal::TypeParameterizedTestSuite<SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
::testing::internal::TypeList<Types>::type>:: \
Register(#Prefix, \
::testing::internal::CodeLocation(__FILE__, __LINE__), \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, \
GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames< \
::testing::internal::NameGeneratorSelector< \
__VA_ARGS__>::type, \
Register(#Prefix, ::testing::internal::CodeLocation(__FILE__, __LINE__), \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames<::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type, \
::testing::internal::TypeList<Types>::type>())
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_CASE_P \
static_assert( \
::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
static_assert(::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,8 @@
#include "gtest/gtest.h"
namespace testing {
namespace testing
{
// This header implements a family of generic predicate assertion
// macros:
@ -79,65 +80,43 @@ namespace testing {
else \
on_failure(gtest_ar.failure_message())
// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code.
template <typename Pred,
typename T1>
AssertionResult AssertPred1Helper(const char* pred_text,
const char* e1,
Pred pred,
const T1& v1) {
if (pred(v1)) return AssertionSuccess();
template <typename Pred, typename T1>
AssertionResult AssertPred1Helper(const char *pred_text, const char *e1, Pred pred, const T1 &v1)
{
if (pred(v1))
return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ") evaluates to false, where"
return AssertionFailure() << pred_text << "(" << e1 << ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1);
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
// Don't use this in your code.
#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
GTEST_ASSERT_(pred_format(#v1, v1), \
on_failure)
#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure) GTEST_ASSERT_(pred_format(#v1, v1), on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
// this in your code.
#define GTEST_PRED1_(pred, v1, on_failure)\
GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
#v1, \
pred, \
v1), on_failure)
#define GTEST_PRED1_(pred, v1, on_failure) GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, #v1, pred, v1), on_failure)
// Unary predicate assertion macros.
#define EXPECT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED1(pred, v1) \
GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT1(pred_format, v1) \
GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED1(pred, v1) \
GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
#define EXPECT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT1(pred_format, v1) GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED1(pred, v1) GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code.
template <typename Pred,
typename T1,
typename T2>
AssertionResult AssertPred2Helper(const char* pred_text,
const char* e1,
const char* e2,
Pred pred,
const T1& v1,
const T2& v2) {
if (pred(v1, v2)) return AssertionSuccess();
template <typename Pred, typename T1, typename T2>
AssertionResult AssertPred2Helper(const char *pred_text, const char *e1, const char *e2, Pred pred, const T1 &v1,
const T2 &v2)
{
if (pred(v1, v2))
return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2
<< ") evaluates to false, where"
return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2);
@ -145,51 +124,29 @@ AssertionResult AssertPred2Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
// Don't use this in your code.
#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
on_failure)
#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure) GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
// this in your code.
#define GTEST_PRED2_(pred, v1, v2, on_failure) \
GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
#v1, \
#v2, \
pred, \
v1, \
v2), on_failure)
GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, #v1, #v2, pred, v1, v2), on_failure)
// Binary predicate assertion macros.
#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED2(pred, v1, v2) \
GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED2(pred, v1, v2) GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code.
template <typename Pred,
typename T1,
typename T2,
typename T3>
AssertionResult AssertPred3Helper(const char* pred_text,
const char* e1,
const char* e2,
const char* e3,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3) {
if (pred(v1, v2, v3)) return AssertionSuccess();
template <typename Pred, typename T1, typename T2, typename T3>
AssertionResult AssertPred3Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, Pred pred,
const T1 &v1, const T2 &v2, const T3 &v3)
{
if (pred(v1, v2, v3))
return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3
<< ") evaluates to false, where"
return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
@ -199,54 +156,30 @@ AssertionResult AssertPred3Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
// Don't use this in your code.
#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
on_failure)
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
// this in your code.
#define GTEST_PRED3_(pred, v1, v2, v3, on_failure) \
GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
#v1, \
#v2, \
#v3, \
pred, \
v1, \
v2, \
v3), on_failure)
GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, #v1, #v2, #v3, pred, v1, v2, v3), on_failure)
// Ternary predicate assertion macros.
#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED3(pred, v1, v2, v3) \
GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define EXPECT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED3(pred, v1, v2, v3) GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code.
template <typename Pred,
typename T1,
typename T2,
typename T3,
typename T4>
AssertionResult AssertPred4Helper(const char* pred_text,
const char* e1,
const char* e2,
const char* e3,
const char* e4,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4) {
if (pred(v1, v2, v3, v4)) return AssertionSuccess();
template <typename Pred, typename T1, typename T2, typename T3, typename T4>
AssertionResult AssertPred4Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4)
{
if (pred(v1, v2, v3, v4))
return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
@ -258,60 +191,33 @@ AssertionResult AssertPred4Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
// Don't use this in your code.
#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
on_failure)
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
// this in your code.
#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure) \
GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
#v1, \
#v2, \
#v3, \
#v4, \
pred, \
v1, \
v2, \
v3, \
v4), on_failure)
GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, #v1, #v2, #v3, #v4, pred, v1, v2, v3, v4), on_failure)
// 4-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED4(pred, v1, v2, v3, v4) GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code.
template <typename Pred,
typename T1,
typename T2,
typename T3,
typename T4,
typename T5>
AssertionResult AssertPred5Helper(const char* pred_text,
const char* e1,
const char* e2,
const char* e3,
const char* e4,
const char* e5,
Pred pred,
const T1& v1,
const T2& v2,
const T3& v3,
const T4& v4,
const T5& v5) {
if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
template <typename Pred, typename T1, typename T2, typename T3, typename T4, typename T5>
AssertionResult AssertPred5Helper(const char *pred_text, const char *e1, const char *e2, const char *e3, const char *e4,
const char *e5, Pred pred, const T1 &v1, const T2 &v2, const T3 &v3, const T4 &v4,
const T5 &v5)
{
if (pred(v1, v2, v3, v4, v5))
return AssertionSuccess();
return AssertionFailure()
<< pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
<< ", " << e5 << ") evaluates to false, where"
return AssertionFailure() << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4 << ", " << e5
<< ") evaluates to false, where"
<< "\n"
<< e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
<< e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
@ -323,36 +229,20 @@ AssertionResult AssertPred5Helper(const char* pred_text,
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
// Don't use this in your code.
#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
on_failure)
GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
// this in your code.
#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \
GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
#v1, \
#v2, \
#v3, \
#v4, \
#v5, \
pred, \
v1, \
v2, \
v3, \
v4, \
v5), on_failure)
GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, pred, v1, v2, v3, v4, v5), on_failure)
// 5-ary predicate assertion macros.
#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
} // namespace testing

Some files were not shown because too many files have changed in this diff Show More