Merge Gitee PR#21 from upd/1.5.0
This commit is contained in:
ShikiSuen 2022-04-11 16:12:23 +00:00 committed by Gitee
commit 61d1e4ab8e
507 changed files with 81528 additions and 64975 deletions

View File

@ -21,7 +21,7 @@
"AmbiguousTrailingClosureOverload" : true,
"BeginDocumentationCommentWithOneLineSummary" : false,
"DoNotUseSemicolons" : true,
"DontRepeatTypeInStaticProperties" : true,
"DontRepeatTypeInStaticProperties" : false,
"FileScopedDeclarationPrivacy" : true,
"FullyIndirectEnum" : true,
"GroupNumericLiterals" : true,
@ -39,7 +39,7 @@
"NoVoidReturnOnFunctionSignature" : true,
"OneCasePerLine" : true,
"OneVariableDeclarationPerLine" : true,
"OnlyOneTrailingClosureArgument" : true,
"OnlyOneTrailingClosureArgument" : false,
"OrderedImports" : true,
"ReturnVoidInsteadOfEmptyTuple" : true,
"UseEarlyExits" : false,
@ -47,7 +47,7 @@
"UseShorthandTypeNames" : true,
"UseSingleLinePropertyGetter" : true,
"UseSynthesizedInitializer" : true,
"UseTripleSlashForDocumentationComments" : true,
"UseTripleSlashForDocumentationComments" : false,
"UseWhereClausesInForLoops" : false,
"ValidateDocumentationComments" : false
},

85
BuildVersionSpecifier.swift Executable file
View File

@ -0,0 +1,85 @@
#!/usr/bin/env swift
// Copyright (c) 2021 and onwards The vChewing Project (MIT-NTL License).
/*
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
1. The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
2. No trademark license is granted to use the trade names, trademarks, service
marks, or product names of Contributor, except as required to fulfill notice
requirements above.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
import Cocoa
extension String {
fileprivate mutating func regReplace(pattern: String, replaceWith: String = "") {
// Ref: https://stackoverflow.com/a/40993403/4162914 && https://stackoverflow.com/a/71291137/4162914
do {
let regex = try NSRegularExpression(
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines])
let range = NSRange(self.startIndex..., in: self)
self = regex.stringByReplacingMatches(
in: self, options: [], range: range, withTemplate: replaceWith)
} catch { return }
}
}
var verMarket: String = "1.0.0"
var verBuild: String = "1000"
var strXcodeProjContent: String = ""
var dirXcodeProjectFile = "./vChewing.xcodeproj/project.pbxproj"
var dirPackageProjectFile = "./vChewing.pkgproj"
var dirUpdateInfoPlist = "./Update-Info.plist"
var theDictionary: NSDictionary?
if CommandLine.arguments.count == 3 {
verMarket = CommandLine.arguments[1]
verBuild = CommandLine.arguments[2]
// Xcode project file version update.
do {
strXcodeProjContent += try String(contentsOfFile: dirXcodeProjectFile, encoding: .utf8)
} catch {
NSLog(" - Exception happened when reading raw phrases data.")
}
strXcodeProjContent.regReplace(
pattern: #"CURRENT_PROJECT_VERSION = .*$"#, replaceWith: "CURRENT_PROJECT_VERSION = " + verBuild + ";")
strXcodeProjContent.regReplace(
pattern: #"MARKETING_VERSION = .*$"#, replaceWith: "MARKETING_VERSION = " + verMarket + ";")
do {
try strXcodeProjContent.write(to: URL(fileURLWithPath: dirXcodeProjectFile), atomically: false, encoding: .utf8)
} catch {
NSLog(" -: Error on writing strings to file: \(error)")
}
NSLog(" - Xcode 專案版本資訊更新完成:\(verMarket) \(verBuild)")
// Packages project file version update.
theDictionary = NSDictionary(contentsOfFile: dirPackageProjectFile)
theDictionary?.setValue(verMarket, forKeyPath: "PACKAGES.PACKAGE_SETTINGS.VERSION")
theDictionary?.write(toFile: dirPackageProjectFile, atomically: true)
NSLog(" - Packages 專案版本資訊更新完成:\(verMarket) \(verBuild)")
// Update notification project file version update.
theDictionary = NSDictionary(contentsOfFile: dirUpdateInfoPlist)
theDictionary?.setValue(verBuild, forKeyPath: "CFBundleVersion")
theDictionary?.setValue(verMarket, forKeyPath: "CFBundleShortVersionString")
theDictionary?.write(toFile: dirUpdateInfoPlist, atomically: true)
NSLog(" - 更新用通知 plist 版本資訊更新完成:\(verMarket) \(verBuild)")
}

View File

@ -40,11 +40,6 @@ extension String {
}
}
private func getDocumentsDirectory() -> URL {
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
return paths[0]
}
// MARK: -
// Ref: https://stackoverflow.com/a/32581409/4162914
extension Float {

View File

@ -78,7 +78,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
return
}
self.installingVersion = installingVersion
self.archiveUtil = ArchiveUtil(appName: kTargetBin, targetAppBundleName: kTargetBundle)
archiveUtil = ArchiveUtil(appName: kTargetBin, targetAppBundleName: kTargetBundle)
_ = archiveUtil?.validateIfNotarizedArchiveExists()
cancelButton.nextKeyView = installButton
@ -153,7 +153,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
atPath: (kTargetPartialPath as NSString).expandingTildeInPath)
== false
{
self.installInputMethod(
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
return
}
@ -208,7 +208,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
timeInterval: kTranslocationRemovalTickInterval, target: self,
selector: #selector(timerTick(_:)), userInfo: nil, repeats: true)
} else {
self.installInputMethod(
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
}
}
@ -321,7 +321,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
ntfPostInstall.addButton(withTitle: NSLocalizedString("OK", comment: ""))
}
}
ntfPostInstall.beginSheetModal(for: window!) { response in
ntfPostInstall.beginSheetModal(for: window!) { _ in
self.endAppWithDelay()
}
}

View File

@ -83,7 +83,7 @@ struct ArchiveUtil {
}
func unzipNotarizedArchive() -> String? {
if !self.validateIfNotarizedArchiveExists() {
if !validateIfNotarizedArchiveExists() {
return nil
}
guard let notarizedArchive = notarizedArchive,

View File

@ -17,3 +17,5 @@
"Warning" = "Warning";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources.";
"Continue" = "Continue";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "お知らせ";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "入力アプリの自動起動はうまく出来なかったかもしれません。ご自分で「システム環境設定→キーボード→入力ソース」で起動してください。";
"Continue" = "続行";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "安装不完整";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "输入法已经安装好,但可能没有完全启用。请从「系统偏好设定」 > 「键盘」 > 「输入方式」分页加入输入法。";
"Continue" = "继续";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -17,3 +17,5 @@
"Warning" = "安裝不完整";
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources." = "輸入法已經安裝好,但可能沒有完全啟用。請從「系統偏好設定」 > 「鍵盤」 > 「輸入方式」分頁加入輸入法。";
"Continue" = "繼續";
"%@ (for version %@, r%@)" = "%@ (for version %@, r%@)";

View File

@ -1,7 +1,7 @@
#!/bin/sh
TARGET='vChewing'
login_user=`/usr/bin/stat -f%Su /dev/console`
login_user=$(/usr/bin/stat -f%Su /dev/console)
# First, copy the wrongfully installed contents to the right location:
cp -r /Library/Input\ Methods/"${TARGET}".app /Users/"${login_user}"/Library/Input\ Methods/ || true

View File

@ -20,21 +20,19 @@ debug:
DSTROOT = /Library/Input Methods
VC_APP_ROOT = $(DSTROOT)/vChewing.app
.PHONY: clang-format lint
.PHONY: clang-format lint batchfix format
format: batchfix clang-format lint
clang-format:
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./DataCompiler/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./Installer/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./Source/
@swift-format format --in-place --configuration ./.clang-format-swift.json --recursive ./UserPhraseEditor/
@find ./Installer/ -iname '*.h' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@find ./Source/3rdParty/OVMandarin -iname '*.h' -o -iname '*.cpp' -o -iname '*.mm' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@find ./Source/Modules/ -iname '*.h' -o -iname '*.cpp' -o -iname '*.mm' -o -iname '*.m' | xargs clang-format -i -style=Microsoft
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format format --in-place --configuration ./.clang-format-swift.json --parallel
@git ls-files --exclude-standard | grep -E '\.(cpp|hpp|c|cc|cxx|hxx|ixx|h|m|mm|hh)$$' | xargs clang-format -i -style=Microsoft
lint:
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./DataCompiler/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./Installer/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./Source/
@swift-format lint --configuration ./.clang-format-swift.json --recursive ./UserPhraseEditor/
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format lint --configuration ./.clang-format-swift.json --parallel
batchfix:
@git ls-files --exclude-standard | grep -E '\.swift$$' | swiftlint --fix --autocorrect
.PHONY: permission-check install-debug install-release

File diff suppressed because it is too large Load Diff

View File

@ -4,44 +4,47 @@
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace {
namespace
{
namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char*> ptrs;
ptrs.reserve(argv.size());
for (auto& arg : argv) {
ptrs.push_back(const_cast<char*>(arg.c_str()));
}
ptrs[0] = const_cast<char*>(executable_name.c_str());
int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i) {
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
std::vector<std::string> Initialize(const std::vector<std::string> &argv)
{
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char *> ptrs;
ptrs.reserve(argv.size());
for (auto &arg : argv)
{
ptrs.push_back(const_cast<char *>(arg.c_str()));
}
ptrs[0] = const_cast<char *>(executable_name.c_str());
int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i)
{
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
}
void RegisterBenchmark(const char* name, py::function f) {
benchmark::RegisterBenchmark(name, [f](benchmark::State& state) {
f(&state);
});
void RegisterBenchmark(const char *name, py::function f)
{
benchmark::RegisterBenchmark(name, [f](benchmark::State &state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m) {
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark);
m.def("RunSpecifiedBenchmarks",
[]() { benchmark::RunSpecifiedBenchmarks(); });
PYBIND11_MODULE(_benchmark, m)
{
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark);
m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); });
py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning)
.def_property_readonly("keep_running", &benchmark::State::KeepRunning);
py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning)
.def_property_readonly("keep_running", &benchmark::State::KeepRunning);
};
} // namespace
} // namespace

View File

@ -1,12 +1,13 @@
#include <gnuregex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}

View File

@ -1,14 +1,15 @@
#include <regex.h>
#include <string>
int main() {
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}

View File

@ -1,10 +1,9 @@
#include <regex>
#include <string>
int main() {
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$",
std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
int main()
{
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$", std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}

View File

@ -1,6 +1,7 @@
#include <chrono>
int main() {
int main()
{
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);

View File

@ -1,4 +1,6 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main() {}
int main()
{
}

View File

@ -1,13 +1,15 @@
#include "benchmark/benchmark.h"
void BM_StringCreation(benchmark::State& state) {
void BM_StringCreation(benchmark::State &state)
{
while (state.KeepRunning())
std::string empty_string;
}
BENCHMARK(BM_StringCreation);
void BM_StringCopy(benchmark::State& state) {
void BM_StringCopy(benchmark::State &state)
{
std::string x = "hello";
while (state.KeepRunning())
std::string copy(x);

View File

@ -3,8 +3,10 @@
#include "internal_macros.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
@ -14,20 +16,18 @@ namespace internal {
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef COMPILER_MSVC
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
template <typename T, size_t N> char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
} // end namespace internal
} // end namespace benchmark
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_ARRAYSIZE_H_
#endif // BENCHMARK_ARRAYSIZE_H_

View File

@ -106,40 +106,34 @@ DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output
DEFINE_int32(v, 0);
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile*) {}
void UseCharPointer(char const volatile *)
{
}
} // namespace internal
} // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
int thread_i, int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: total_iterations_(0),
batch_leftover_(0),
max_iterations(max_iters),
started_(false),
finished_(false),
error_occurred_(false),
range_(ranges),
complexity_n_(0),
counters(),
thread_index(thread_i),
threads(n_threads),
timer_(timer),
manager_(manager) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
State::State(IterationCount max_iters, const std::vector<int64_t> &ranges, int thread_i, int n_threads,
internal::ThreadTimer *timer, internal::ThreadManager *manager)
: total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false),
error_occurred_(false), range_(ranges), complexity_n_(0), counters(), thread_index(thread_i), threads(n_threads),
timer_(timer), manager_(manager)
{
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
#if defined(__INTEL_COMPILER)
#pragma warning push
#pragma warning(disable : 1875)
@ -147,11 +141,9 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <=
(cache_line_size - sizeof(error_occurred_)),
"");
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <= (cache_line_size - sizeof(error_occurred_)), "");
#if defined(__INTEL_COMPILER)
#pragma warning pop
#elif defined(__GNUC__)
@ -159,128 +151,144 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
#endif
}
void State::PauseTiming() {
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
void State::PauseTiming()
{
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
}
void State::ResumeTiming() {
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
void State::ResumeTiming()
{
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
}
void State::SkipWithError(const char* msg) {
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false) {
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
void State::SkipWithError(const char *msg)
{
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false)
{
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
}
}
total_iterations_ = 0;
if (timer_->running()) timer_->StopTimer();
total_iterations_ = 0;
if (timer_->running())
timer_->StopTimer();
}
void State::SetIterationTime(double seconds) {
timer_->SetIterationTime(seconds);
void State::SetIterationTime(double seconds)
{
timer_->SetIterationTime(seconds);
}
void State::SetLabel(const char* label) {
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
void State::SetLabel(const char *label)
{
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
void State::StartKeepRunning() {
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_) ResumeTiming();
void State::StartKeepRunning()
{
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_)
ResumeTiming();
}
void State::FinishKeepRunning() {
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
total_iterations_ = 0;
finished_ = true;
manager_->StartStopBarrier();
void State::FinishKeepRunning()
{
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_)
{
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
total_iterations_ = 0;
finished_ = true;
manager_->StartStopBarrier();
}
namespace internal {
namespace {
namespace internal
{
namespace
{
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
void RunBenchmarks(const std::vector<BenchmarkInstance> &benchmarks, BenchmarkReporter *display_reporter,
BenchmarkReporter *file_reporter)
{
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
for (const BenchmarkInstance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1;
// Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
for (const BenchmarkInstance &benchmark : benchmarks)
{
name_field_width = std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1;
for (const auto& Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
for (const auto &Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates)
name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Print header here
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Keep track of running times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// Keep track of running times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter* reporter) {
if (!reporter) return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
};
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter *reporter) {
if (!reporter)
return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
};
if (display_reporter->ReportContext(context) &&
(!file_reporter || file_reporter->ReportContext(context))) {
if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context)))
{
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto &benchmark : benchmarks)
{
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter *reporter, bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
if (!report_aggregates_only)
reporter->ReportRuns(run_results.non_aggregates);
if (!run_results.aggregates_only.empty())
reporter->ReportRuns(run_results.aggregates_only);
};
report(display_reporter, run_results.display_report_aggregates_only);
if (file_reporter)
report(file_reporter, run_results.file_report_aggregates_only);
flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter)
file_reporter->Finalize();
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto& benchmark : benchmarks) {
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter* reporter,
bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
if (!report_aggregates_only)
reporter->ReportRuns(run_results.non_aggregates);
if (!run_results.aggregates_only.empty())
reporter->ReportRuns(run_results.aggregates_only);
};
report(display_reporter, run_results.display_report_aggregates_only);
if (file_reporter)
report(file_reporter, run_results.file_report_aggregates_only);
flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter) file_reporter->Finalize();
flushStreams(display_reporter);
flushStreams(file_reporter);
}
// Disable deprecated warnings temporarily because we need to reference
@ -290,210 +298,241 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::unique_ptr<BenchmarkReporter> CreateReporter(
std::string const& name, ConsoleReporter::OutputOptions output_opts) {
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console") {
return PtrType(new ConsoleReporter(output_opts));
} else if (name == "json") {
return PtrType(new JSONReporter);
} else if (name == "csv") {
return PtrType(new CSVReporter);
} else {
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
std::unique_ptr<BenchmarkReporter> CreateReporter(std::string const &name, ConsoleReporter::OutputOptions output_opts)
{
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console")
{
return PtrType(new ConsoleReporter(output_opts));
}
else if (name == "json")
{
return PtrType(new JSONReporter);
}
else if (name == "csv")
{
return PtrType(new CSVReporter);
}
else
{
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
} // end namespace
} // end namespace
bool IsZero(double n) {
return std::abs(n) < std::numeric_limits<double>::epsilon();
bool IsZero(double n)
{
return std::abs(n) < std::numeric_limits<double>::epsilon();
}
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color) {
return false;
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color)
{
int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color)
{
return false;
}
if (FLAGS_benchmark_color == "auto")
{
return IsColorTerminal();
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color())
{
output_opts |= ConsoleReporter::OO_Color;
}
if (FLAGS_benchmark_color == "auto") {
return IsColorTerminal();
else
{
output_opts &= ~ConsoleReporter::OO_Color;
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color()) {
output_opts |= ConsoleReporter::OO_Color;
} else {
output_opts &= ~ConsoleReporter::OO_Color;
}
if (FLAGS_benchmark_counters_tabular) {
output_opts |= ConsoleReporter::OO_Tabular;
} else {
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
if (FLAGS_benchmark_counters_tabular)
{
output_opts |= ConsoleReporter::OO_Tabular;
}
else
{
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
}
} // end namespace internal
} // end namespace internal
size_t RunSpecifiedBenchmarks() {
return RunSpecifiedBenchmarks(nullptr, nullptr);
size_t RunSpecifiedBenchmarks()
{
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
return RunSpecifiedBenchmarks(display_reporter, nullptr);
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter)
{
return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter, BenchmarkReporter *file_reporter)
{
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
// Setup the reporters
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter) {
default_display_reporter = internal::CreateReporter(
FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get();
}
auto& Out = display_reporter->GetOutputStream();
auto& Err = display_reporter->GetErrorStream();
std::string const& fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter) {
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty()) {
output_file.open(fname);
if (!output_file.is_open()) {
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
// Setup the reporters
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter)
{
default_display_reporter = internal::CreateReporter(FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get();
}
if (!file_reporter) {
default_file_reporter = internal::CreateReporter(
FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
auto &Out = display_reporter->GetOutputStream();
auto &Err = display_reporter->GetErrorStream();
std::string const &fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter)
{
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty())
{
output_file.open(fname);
if (!output_file.is_open())
{
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter)
{
default_file_reporter = internal::CreateReporter(FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err))
return 0;
if (benchmarks.empty()) {
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
if (benchmarks.empty())
{
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests)
{
for (auto const &benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
}
else
{
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager *manager)
{
internal::memory_manager = manager;
}
namespace internal
{
void PrintUsageAndExit()
{
fprintf(stdout, "benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int *argc, char **argv)
{
using namespace benchmark;
BenchmarkReporter::Context::executable_name = (argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i)
{
if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v))
{
for (int j = i; j != *argc - 1; ++j)
argv[j] = argv[j + 1];
--(*argc);
--i;
}
else if (IsFlag(argv[i], "help"))
{
PrintUsageAndExit();
}
}
for (auto const *flag : {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv")
{
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty())
{
PrintUsageAndExit();
}
}
int InitializeStreams()
{
static std::ios_base::Init init;
return 0;
}
if (FLAGS_benchmark_list_tests) {
for (auto const& benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
} else {
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager* manager) {
internal::memory_manager = manager;
} // end namespace internal
void Initialize(int *argc, char **argv)
{
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
namespace internal {
void PrintUsageAndExit() {
fprintf(stdout,
"benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int* argc, char** argv) {
using namespace benchmark;
BenchmarkReporter::Context::executable_name =
(argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i) {
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
&FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time",
&FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions",
&FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
&FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
&FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format",
&FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
&FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
--(*argc);
--i;
} else if (IsFlag(argv[i], "help")) {
PrintUsageAndExit();
bool ReportUnrecognizedArguments(int argc, char **argv)
{
for (int i = 1; i < argc; ++i)
{
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
}
}
for (auto const* flag :
{&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv") {
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty()) {
PrintUsageAndExit();
}
return argc > 1;
}
int InitializeStreams() {
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int* argc, char** argv) {
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
bool ReportUnrecognizedArguments(int argc, char** argv) {
for (int i = 1; i < argc; ++i) {
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
argv[i]);
}
return argc > 1;
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -1,15 +1,17 @@
#include "benchmark_api_internal.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
State BenchmarkInstance::Run(IterationCount iters, int thread_id,
internal::ThreadTimer* timer,
internal::ThreadManager* manager) const {
State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st);
return st;
State BenchmarkInstance::Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager *manager) const
{
State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st);
return st;
}
} // internal
} // benchmark
} // namespace internal
} // namespace benchmark

View File

@ -11,43 +11,44 @@
#include <string>
#include <vector>
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// Information kept per benchmark we may want to run
struct BenchmarkInstance {
BenchmarkName name;
Benchmark* benchmark;
AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
bool measure_process_cpu_time;
bool use_real_time;
bool use_manual_time;
BigO complexity;
BigOFunc* complexity_lambda;
UserCounters counters;
const std::vector<Statistics>* statistics;
bool last_benchmark_instance;
int repetitions;
double min_time;
IterationCount iterations;
int threads; // Number of concurrent threads to us
struct BenchmarkInstance
{
BenchmarkName name;
Benchmark *benchmark;
AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
bool measure_process_cpu_time;
bool use_real_time;
bool use_manual_time;
BigO complexity;
BigOFunc *complexity_lambda;
UserCounters counters;
const std::vector<Statistics> *statistics;
bool last_benchmark_instance;
int repetitions;
double min_time;
IterationCount iterations;
int threads; // Number of concurrent threads to us
State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer,
internal::ThreadManager* manager) const;
State Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager *manager) const;
};
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
bool IsZero(double n);
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal
} // end namespace benchmark
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_API_INTERNAL_H
#endif // BENCHMARK_API_INTERNAL_H

View File

@ -14,45 +14,53 @@
#include <benchmark/benchmark.h>
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
// Compute the total size of a pack of std::strings
size_t size_impl() { return 0; }
size_t size_impl()
{
return 0;
}
template <typename Head, typename... Tail>
size_t size_impl(const Head& head, const Tail&... tail) {
return head.size() + size_impl(tail...);
template <typename Head, typename... Tail> size_t size_impl(const Head &head, const Tail &...tail)
{
return head.size() + size_impl(tail...);
}
// Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin
void join_impl(std::string&, char) {}
void join_impl(std::string &, char)
{
}
template <typename Head, typename... Tail>
void join_impl(std::string& s, const char delimiter, const Head& head,
const Tail&... tail) {
if (!s.empty() && !head.empty()) {
s += delimiter;
}
void join_impl(std::string &s, const char delimiter, const Head &head, const Tail &...tail)
{
if (!s.empty() && !head.empty())
{
s += delimiter;
}
s += head;
s += head;
join_impl(s, delimiter, tail...);
join_impl(s, delimiter, tail...);
}
template <typename... Ts>
std::string join(char delimiter, const Ts&... ts) {
std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...);
return s;
template <typename... Ts> std::string join(char delimiter, const Ts &...ts)
{
std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...);
return s;
}
} // namespace
} // namespace
std::string BenchmarkName::str() const {
return join('/', function_name, args, min_time, iterations, repetitions,
time_type, threads);
std::string BenchmarkName::str() const
{
return join('/', function_name, args, min_time, iterations, repetitions, time_type, threads);
}
} // namespace benchmark
} // namespace benchmark

View File

@ -52,17 +52,20 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
static const size_t kMaxFamilySize = 100;
} // end namespace
} // end namespace
namespace internal {
namespace internal
{
//=============================================================================//
// BenchmarkFamilies
@ -70,437 +73,492 @@ namespace internal {
// Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies {
public:
static BenchmarkFamilies* GetInstance();
class BenchmarkFamilies
{
public:
static BenchmarkFamilies *GetInstance();
// Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Clear all registered benchmark families.
void ClearBenchmarks();
// Clear all registered benchmark families.
void ClearBenchmarks();
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err);
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
private:
BenchmarkFamilies() {}
private:
BenchmarkFamilies()
{
}
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
};
BenchmarkFamilies* BenchmarkFamilies::GetInstance() {
static BenchmarkFamilies instance;
return &instance;
BenchmarkFamilies *BenchmarkFamilies::GetInstance()
{
static BenchmarkFamilies instance;
return &instance;
}
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family) {
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family)
{
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
}
void BenchmarkFamilies::ClearBenchmarks() {
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
void BenchmarkFamilies::ClearBenchmarks()
{
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
}
bool BenchmarkFamilies::FindBenchmarks(
std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
auto& Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if (spec[0] == '-') {
spec.replace(0, 1, "");
isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg)) {
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark>& family : families_) {
// Family was deleted or benchmark doesn't match
if (!family) continue;
if (family->ArgsCnt() == -1) {
family->Args({});
bool BenchmarkFamilies::FindBenchmarks(std::string spec, std::vector<BenchmarkInstance> *benchmarks,
std::ostream *ErrStream)
{
CHECK(ErrStream);
auto &Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if (spec[0] == '-')
{
spec.replace(0, 1, "");
isNegativeFilter = true;
}
const std::vector<int>* thread_counts =
(family->thread_counts_.empty()
? &one_thread
: &static_cast<const std::vector<int>&>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize) {
Err << "The number of inputs is very large. " << family->name_
<< " will be repeated at least " << family_size << " times.\n";
if (!re.Init(spec, &error_msg))
{
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".") benchmarks->reserve(family_size);
for (auto const& args : family->args_) {
for (int num_threads : *thread_counts) {
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
// Add arguments to instance name
size_t arg_i = 0;
for (auto const& arg : args) {
if (!instance.name.args.empty()) {
instance.name.args += '/';
}
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark> &family : families_)
{
// Family was deleted or benchmark doesn't match
if (!family)
continue;
if (arg_i < family->arg_names_.size()) {
const auto& arg_name = family->arg_names_[arg_i];
if (!arg_name.empty()) {
instance.name.args += StrFormat("%s:", arg_name.c_str());
if (family->ArgsCnt() == -1)
{
family->Args({});
}
const std::vector<int> *thread_counts =
(family->thread_counts_.empty() ? &one_thread
: &static_cast<const std::vector<int> &>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize)
{
Err << "The number of inputs is very large. " << family->name_ << " will be repeated at least "
<< family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".")
benchmarks->reserve(family_size);
for (auto const &args : family->args_)
{
for (int num_threads : *thread_counts)
{
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Add arguments to instance name
size_t arg_i = 0;
for (auto const &arg : args)
{
if (!instance.name.args.empty())
{
instance.name.args += '/';
}
if (arg_i < family->arg_names_.size())
{
const auto &arg_name = family->arg_names_[arg_i];
if (!arg_name.empty())
{
instance.name.args += StrFormat("%s:", arg_name.c_str());
}
}
instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name.min_time = StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
{
instance.name.iterations =
StrFormat("iterations:%lu", static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions = StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_)
{
instance.name.time_type = "process_time";
}
if (family->use_manual_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
}
else if (family->use_real_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty())
{
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) || (!re.Match(full_name) && isNegativeFilter))
{
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
}
}
instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name.min_time =
StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0) {
instance.name.iterations =
StrFormat("iterations:%lu",
static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions =
StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_) {
instance.name.time_type = "process_time";
}
if (family->use_manual_time_) {
if (!instance.name.time_type.empty()) {
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
} else if (family->use_real_time_) {
if (!instance.name.time_type.empty()) {
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty()) {
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) ||
(!re.Match(full_name) && isNegativeFilter)) {
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
}
}
}
return true;
return true;
}
Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies* families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
return bench;
Benchmark *RegisterBenchmarkInternal(Benchmark *bench)
{
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies *families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
return bench;
}
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string& re,
std::vector<BenchmarkInstance>* benchmarks,
std::ostream* Err) {
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err)
{
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
//=============================================================================//
// Benchmark
//=============================================================================//
Benchmark::Benchmark(const char* name)
: name_(name),
aggregation_report_mode_(ARM_Unspecified),
time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier),
min_time_(0),
iterations_(0),
repetitions_(0),
measure_process_cpu_time_(false),
use_real_time_(false),
use_manual_time_(false),
complexity_(oNone),
complexity_lambda_(nullptr) {
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
Benchmark::Benchmark(const char *name)
: name_(name), aggregation_report_mode_(ARM_Unspecified), time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), repetitions_(0),
measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone),
complexity_lambda_(nullptr)
{
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
}
Benchmark::~Benchmark() {}
Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
Benchmark::~Benchmark()
{
}
Benchmark* Benchmark::Unit(TimeUnit unit) {
time_unit_ = unit;
return this;
Benchmark *Benchmark::Arg(int64_t x)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist) {
args_.push_back({i});
}
return this;
Benchmark *Benchmark::Unit(TimeUnit unit)
{
time_unit_ = unit;
return this;
}
Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
total *= arglists[i].size();
}
Benchmark *Benchmark::Range(int64_t start, int64_t limit)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
std::vector<std::size_t> ctr(arglists.size(), 0);
for (int64_t i : arglist)
{
args_.push_back({i});
}
return this;
}
for (std::size_t i = 0; i < total; i++) {
std::vector<int64_t> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) {
tmp.push_back(arglists[j].at(ctr[j]));
Benchmark *Benchmark::Ranges(const std::vector<std::pair<int64_t, int64_t>> &ranges)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++)
{
AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_);
total *= arglists[i].size();
}
args_.push_back(std::move(tmp));
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t j = 0; j < arglists.size(); j++) {
if (ctr[j] + 1 < arglists[j].size()) {
++ctr[j];
break;
}
ctr[j] = 0;
for (std::size_t i = 0; i < total; i++)
{
std::vector<int64_t> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++)
{
tmp.push_back(arglists[j].at(ctr[j]));
}
args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++)
{
if (ctr[j] + 1 < arglists[j].size())
{
++ctr[j];
break;
}
ctr[j] = 0;
}
}
}
return this;
return this;
}
Benchmark* Benchmark::ArgName(const std::string& name) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
Benchmark *Benchmark::ArgName(const std::string &name)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
Benchmark *Benchmark::ArgNames(const std::vector<std::string> &names)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
return this;
Benchmark *Benchmark::DenseRange(int64_t start, int64_t limit, int step)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step)
{
args_.push_back({arg});
}
return this;
}
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
Benchmark *Benchmark::Args(const std::vector<int64_t> &args)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
custom_arguments(this);
return this;
Benchmark *Benchmark::Apply(void (*custom_arguments)(Benchmark *benchmark))
{
custom_arguments(this);
return this;
}
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
Benchmark *Benchmark::RangeMultiplier(int multiplier)
{
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
Benchmark *Benchmark::MinTime(double t)
{
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(IterationCount n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
Benchmark *Benchmark::Iterations(IterationCount n)
{
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
CHECK(n > 0);
repetitions_ = n;
return this;
Benchmark *Benchmark::Repetitions(int n)
{
CHECK(n > 0);
repetitions_ = n;
return this;
}
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
Benchmark *Benchmark::ReportAggregatesOnly(bool value)
{
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
}
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
// If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_Default);
Benchmark *Benchmark::DisplayAggregatesOnly(bool value)
{
// If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_Default);
if (value) {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
} else {
aggregation_report_mode_ = static_cast<AggregationReportMode>(
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
if (value)
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
}
else
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
return this;
return this;
}
Benchmark* Benchmark::MeasureProcessCPUTime() {
// Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true;
return this;
Benchmark *Benchmark::MeasureProcessCPUTime()
{
// Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true;
return this;
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
Benchmark *Benchmark::UseRealTime()
{
CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark* Benchmark::UseManualTime() {
CHECK(!use_real_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
Benchmark *Benchmark::UseManualTime()
{
CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
}
Benchmark* Benchmark::Complexity(BigO complexity) {
complexity_ = complexity;
return this;
Benchmark *Benchmark::Complexity(BigO complexity)
{
complexity_ = complexity;
return this;
}
Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
Benchmark *Benchmark::Complexity(BigOFunc *complexity)
{
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
}
Benchmark* Benchmark::ComputeStatistics(std::string name,
StatisticsFunc* statistics) {
statistics_.emplace_back(name, statistics);
return this;
Benchmark *Benchmark::ComputeStatistics(std::string name, StatisticsFunc *statistics)
{
statistics_.emplace_back(name, statistics);
return this;
}
Benchmark* Benchmark::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
Benchmark *Benchmark::Threads(int t)
{
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
Benchmark *Benchmark::ThreadRange(int min_threads, int max_threads)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
}
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
int stride) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
Benchmark *Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) {
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
for (auto i = min_threads; i < max_threads; i += stride)
{
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
}
Benchmark* Benchmark::ThreadPerCpu() {
thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
Benchmark *Benchmark::ThreadPerCpu()
{
thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
}
void Benchmark::SetName(const char* name) { name_ = name; }
void Benchmark::SetName(const char *name)
{
name_ = name;
}
int Benchmark::ArgsCnt() const {
if (args_.empty()) {
if (arg_names_.empty()) return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
int Benchmark::ArgsCnt() const
{
if (args_.empty())
{
if (arg_names_.empty())
return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
}
//=============================================================================//
// FunctionBenchmark
//=============================================================================//
void FunctionBenchmark::Run(State& st) { func_(st); }
} // end namespace internal
void ClearRegisteredBenchmarks() {
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
void FunctionBenchmark::Run(State &st)
{
func_(st);
}
} // end namespace benchmark
} // end namespace internal
void ClearRegisteredBenchmarks()
{
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}
} // end namespace benchmark

View File

@ -5,103 +5,112 @@
#include "check.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
// Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range.
template <typename T>
typename std::vector<T>::iterator
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
template <typename T> typename std::vector<T>::iterator AddPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
const size_t start_offset = dst->size();
const size_t start_offset = dst->size();
static const T kmax = std::numeric_limits<T>::max();
static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult) {
if (i >= lo) {
dst->push_back(i);
// Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult)
{
if (i >= lo)
{
dst->push_back(i);
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult)
break;
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult) break;
}
return dst->begin() + start_offset;
return dst->begin() + start_offset;
}
template <typename T>
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo);
CHECK_LE(hi, 0);
template <typename T> void AddNegatedPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo);
CHECK_LE(hi, 0);
// Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted
// to 'int' when negating.
const auto lo_complement = static_cast<T>(-lo);
const auto hi_complement = static_cast<T>(-hi);
// Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted
// to 'int' when negating.
const auto lo_complement = static_cast<T>(-lo);
const auto hi_complement = static_cast<T>(-hi);
const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T& t) { t *= -1; });
std::reverse(it, dst->end());
std::for_each(it, dst->end(), [](T &t) { t *= -1; });
std::reverse(it, dst->end());
}
template <typename T>
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
"Args type must be a signed integer");
template <typename T> void AddRange(std::vector<T> *dst, T lo, T hi, int mult)
{
static_assert(std::is_integral<T>::value && std::is_signed<T>::value, "Args type must be a signed integer");
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
// Add "lo"
dst->push_back(lo);
// Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T.
if (lo == hi) return;
// Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T.
if (lo == hi)
return;
// Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi) {
dst->push_back(hi);
return;
}
// Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi)
{
dst->push_back(hi);
return;
}
// Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
const auto lo_inner = static_cast<T>(lo + 1);
const auto hi_inner = static_cast<T>(hi - 1);
// Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
const auto lo_inner = static_cast<T>(lo + 1);
const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values
if (lo_inner < 0) {
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Insert negative values
if (lo_inner < 0)
{
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0) {
dst->push_back(0);
}
// Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0)
{
dst->push_back(0);
}
// Insert positive values
if (hi_inner > 0) {
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Insert positive values
if (hi_inner > 0)
{
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Add "hi" (if different from last value).
if (hi != dst->back()) {
dst->push_back(hi);
}
// Add "hi" (if different from last value).
if (hi != dst->back())
{
dst->push_back(hi);
}
}
} // namespace internal
} // namespace benchmark
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_REGISTER_H
#endif // BENCHMARK_REGISTER_H

View File

@ -51,312 +51,324 @@
#include "thread_manager.h"
#include "thread_timer.h"
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
MemoryManager* memory_manager = nullptr;
MemoryManager *memory_manager = nullptr;
namespace {
namespace
{
static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
const internal::ThreadManager::Result& results,
IterationCount memory_iterations,
const MemoryManager::Result& memory_result, double seconds,
int64_t repetition_index) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
BenchmarkReporter::Run CreateRunReport(const benchmark::internal::BenchmarkInstance &b,
const internal::ThreadManager::Result &results, IterationCount memory_iterations,
const MemoryManager::Result &memory_result, double seconds,
int64_t repetition_index)
{
// Create report about this benchmark run.
BenchmarkReporter::Run report;
report.run_name = b.name;
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
report.time_unit = b.time_unit;
report.threads = b.threads;
report.repetition_index = repetition_index;
report.repetitions = b.repetitions;
report.run_name = b.name;
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
report.time_unit = b.time_unit;
report.threads = b.threads;
report.repetition_index = repetition_index;
report.repetitions = b.repetitions;
if (!report.error_occurred) {
if (b.use_manual_time) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
if (!report.error_occurred)
{
if (b.use_manual_time)
{
report.real_accumulated_time = results.manual_time_used;
}
else
{
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0)
{
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations : 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0) {
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) /
memory_iterations
: 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
}
return report;
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
void RunInThread(const BenchmarkInstance* b, IterationCount iters,
int thread_id, ThreadManager* manager) {
internal::ThreadTimer timer(
b->measure_process_cpu_time
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result& results = manager->results;
results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
results.complexity_n += st.complexity_length_n();
internal::Increment(&results.counters, st.counters);
}
manager->NotifyThreadComplete();
void RunInThread(const BenchmarkInstance *b, IterationCount iters, int thread_id, ThreadManager *manager)
{
internal::ThreadTimer timer(b->measure_process_cpu_time ? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result &results = manager->results;
results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
results.complexity_n += st.complexity_length_n();
internal::Increment(&results.counters, st.counters);
}
manager->NotifyThreadComplete();
}
class BenchmarkRunner {
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
std::vector<BenchmarkReporter::Run>* complexity_reports_)
: b(b_),
complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations != 0),
pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1) {
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only ||
FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only =
FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only =
(b.aggregation_report_mode &
internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
DoOneRepetition(repetition_num);
}
// Calculate additional statistics
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance) {
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
}
RunResults&& get_results() { return std::move(run_results); }
private:
RunResults run_results;
const benchmark::internal::BenchmarkInstance& b;
std::vector<BenchmarkReporter::Run>& complexity_reports;
const double min_time;
const int repeats;
const bool has_explicit_iteration_count;
std::vector<std::thread> pool;
IterationCount iters; // preserved between repetitions!
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
struct IterationResults {
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations() {
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads));
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
manager.get());
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
// Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get());
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread& thread : pool) thread.join();
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
class BenchmarkRunner
{
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance &b_,
std::vector<BenchmarkReporter::Run> *complexity_reports_)
: b(b_), complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations != 0), pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1)
{
MutexLock l(manager->GetBenchmarkMutex());
i.results = manager->results;
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only = FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified)
{
run_results.display_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++)
{
DoOneRepetition(repetition_num);
}
// Calculate additional statistics
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance)
{
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
}
// And get rid of the manager.
manager.reset();
// Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
<< i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time) {
i.seconds = i.results.manual_time_used;
} else if (b.use_real_time) {
i.seconds = i.results.real_time_used;
RunResults &&get_results()
{
return std::move(run_results);
}
return i;
}
private:
RunResults run_results;
IterationCount PredictNumItersNeeded(const IterationResults& i) const {
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0) multiplier = 2.0;
const benchmark::internal::BenchmarkInstance &b;
std::vector<BenchmarkReporter::Run> &complexity_reports;
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters),
static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
const double min_time;
const int repeats;
const bool has_explicit_iteration_count;
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
return next_iters; // round up before conversion to integer.
}
std::vector<std::thread> pool;
bool ShouldReportIterationResults(const IterationResults& i) const {
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ ||
i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
IterationCount iters; // preserved between repetitions!
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
void DoOneRepetition(int64_t repetition_index) {
const bool is_the_first_repetition = repetition_index == 0;
IterationResults i;
struct IterationResults
{
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations()
{
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
// We *may* be gradually increasing the length (iteration count)
// of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;) {
i = DoNIterations();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads));
// Do we consider the results to be significant?
// If we are doing repetitions, and the first repetition was already done,
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant = !is_the_first_repetition ||
has_explicit_iteration_count ||
ShouldReportIterationResults(i);
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti)
{
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), manager.get());
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
// Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get());
if (results_are_significant) break; // Good, let's report them!
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread &thread : pool)
thread.join();
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
{
MutexLock l(manager->GetBenchmarkMutex());
i.results = manager->results;
}
iters = PredictNumItersNeeded(i);
assert(iters > i.iters &&
"if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
// And get rid of the manager.
manager.reset();
// Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time)
i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time)
{
i.seconds = i.results.manual_time_used;
}
else if (b.use_real_time)
{
i.seconds = i.results.real_time_used;
}
return i;
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr) {
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
memory_manager->Start();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
RunInThread(&b, memory_iterations, 0, manager.get());
manager->WaitForAllThreads();
manager.reset();
IterationCount PredictNumItersNeeded(const IterationResults &i) const
{
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0)
multiplier = 2.0;
memory_manager->Stop(&memory_result);
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters), static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
return next_iters; // round up before conversion to integer.
}
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result,
i.seconds, repetition_index);
bool ShouldReportIterationResults(const IterationResults &i) const
{
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ || i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report);
void DoOneRepetition(int64_t repetition_index)
{
const bool is_the_first_repetition = repetition_index == 0;
IterationResults i;
run_results.non_aggregates.push_back(report);
}
// We *may* be gradually increasing the length (iteration count)
// of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;)
{
i = DoNIterations();
// Do we consider the results to be significant?
// If we are doing repetitions, and the first repetition was already done,
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant =
!is_the_first_repetition || has_explicit_iteration_count || ShouldReportIterationResults(i);
if (results_are_significant)
break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i);
assert(iters > i.iters && "if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr)
{
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
memory_manager->Start();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
RunInThread(&b, memory_iterations, 0, manager.get());
manager->WaitForAllThreads();
manager.reset();
memory_manager->Stop(&memory_result);
}
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, repetition_index);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report);
run_results.non_aggregates.push_back(report);
}
};
} // end namespace
} // end namespace
RunResults RunBenchmark(
const benchmark::internal::BenchmarkInstance& b,
std::vector<BenchmarkReporter::Run>* complexity_reports) {
internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results();
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports)
{
internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results();
}
} // end namespace internal
} // end namespace internal
} // end namespace benchmark
} // end namespace benchmark

View File

@ -26,26 +26,28 @@ DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
namespace benchmark {
namespace benchmark
{
namespace internal {
namespace internal
{
extern MemoryManager* memory_manager;
extern MemoryManager *memory_manager;
struct RunResults {
std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
struct RunResults
{
std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
bool display_report_aggregates_only = false;
bool file_report_aggregates_only = false;
bool display_report_aggregates_only = false;
bool file_report_aggregates_only = false;
};
RunResults RunBenchmark(
const benchmark::internal::BenchmarkInstance& b,
std::vector<BenchmarkReporter::Run>* complexity_reports);
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports);
} // namespace internal
} // namespace internal
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_RUNNER_H_
#endif // BENCHMARK_RUNNER_H_

View File

@ -8,56 +8,63 @@
#include "internal_macros.h"
#include "log.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
typedef void(AbortHandlerT)();
inline AbortHandlerT*& GetAbortHandler() {
static AbortHandlerT* handler = &std::abort;
return handler;
inline AbortHandlerT *&GetAbortHandler()
{
static AbortHandlerT *handler = &std::abort;
return handler;
}
BENCHMARK_NORETURN inline void CallAbortHandler() {
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
BENCHMARK_NORETURN inline void CallAbortHandler()
{
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
class CheckHandler {
public:
CheckHandler(const char* check, const char* file, const char* func, int line)
: log_(GetErrorLogInstance()) {
log_ << file << ":" << line << ": " << func << ": Check `" << check
<< "' failed. ";
}
class CheckHandler
{
public:
CheckHandler(const char *check, const char *file, const char *func, int line) : log_(GetErrorLogInstance())
{
log_ << file << ":" << line << ": " << func << ": Check `" << check << "' failed. ";
}
LogType& GetLog() { return log_; }
LogType &GetLog()
{
return log_;
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) {
log_ << std::endl;
CallAbortHandler();
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false)
{
log_ << std::endl;
CallAbortHandler();
}
CheckHandler& operator=(const CheckHandler&) = delete;
CheckHandler(const CheckHandler&) = delete;
CheckHandler() = delete;
CheckHandler &operator=(const CheckHandler &) = delete;
CheckHandler(const CheckHandler &) = delete;
CheckHandler() = delete;
private:
LogType& log_;
private:
LogType &log_;
};
} // end namespace internal
} // end namespace benchmark
} // end namespace internal
} // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information
// written to it.
#ifndef NDEBUG
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
.GetLog())
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__).GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif

View File

@ -25,164 +25,174 @@
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#include <io.h>
#include <windows.h>
#else
#include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS
#endif // BENCHMARK_OS_WINDOWS
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
#ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode;
#else
typedef const char* PlatformColorCode;
typedef const char *PlatformColorCode;
#endif
PlatformColorCode GetPlatformColorCode(LogColor color) {
PlatformColorCode GetPlatformColorCode(LogColor color)
{
#ifdef BENCHMARK_OS_WINDOWS
switch (color) {
switch (color)
{
case COLOR_RED:
return FOREGROUND_RED;
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_BLUE:
return FOREGROUND_BLUE;
return FOREGROUND_BLUE;
case COLOR_MAGENTA:
return FOREGROUND_BLUE | FOREGROUND_RED;
return FOREGROUND_BLUE | FOREGROUND_RED;
case COLOR_CYAN:
return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default
return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default
default:
return 0;
}
#else
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace
std::string FormatString(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else {
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char* msg, ...) {
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args) {
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char* color_code = GetPlatformColorCode(color);
if (color_code) out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal() {
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char* const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color",
"screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color",
"linux", "cygwin",
};
const char* const term = getenv("TERM");
bool term_supports_color = false;
for (const char* candidate : SUPPORTED_TERM_VALUES) {
if (term && 0 == strcmp(term, candidate)) {
term_supports_color = true;
break;
return 0;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
#else
switch (color)
{
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace benchmark
} // end namespace
std::string FormatString(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else
{
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char *msg, ...)
{
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args)
{
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char *color_code = GetPlatformColorCode(color);
if (color_code)
out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal()
{
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char *const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color", "screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", "linux", "cygwin",
};
const char *const term = getenv("TERM");
bool term_supports_color = false;
for (const char *candidate : SUPPORTED_TERM_VALUES)
{
if (term && 0 == strcmp(term, candidate))
{
term_supports_color = true;
break;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
}
} // end namespace benchmark

View File

@ -5,29 +5,30 @@
#include <iostream>
#include <string>
namespace benchmark {
enum LogColor {
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
namespace benchmark
{
enum LogColor
{
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
};
std::string FormatString(const char* msg, va_list args);
std::string FormatString(const char* msg, ...);
std::string FormatString(const char *msg, va_list args);
std::string FormatString(const char *msg, ...);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt,
va_list args);
void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise.
bool IsColorTerminal();
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_COLORPRINT_H_
#endif // BENCHMARK_COLORPRINT_H_

View File

@ -21,112 +21,121 @@
#include <iostream>
#include <limits>
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
bool ParseInt32(const std::string &src_text, const char *str, int32_t *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Has strtol() consumed all characters in the string?
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() ||
long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
) {
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
return false;
}
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() || long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
)
{
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
return false;
}
*value = result;
return true;
*value = result;
return true;
}
// Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string& src_text, const char* str, double* value) {
// Parses the environment variable as a decimal integer.
char* end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
bool ParseDouble(const std::string &src_text, const char *str, double *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0') {
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Has strtol() consumed all characters in the string?
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
*value = double_value;
return true;
*value = double_value;
return true;
}
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char* flag) {
const std::string flag_str(flag);
static std::string FlagToEnvVar(const char *flag)
{
const std::string flag_str(flag);
std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
return "BENCHMARK_" + env_var;
return "BENCHMARK_" + env_var;
}
} // namespace
} // namespace
bool BoolFromEnv(const char* flag, bool default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
bool BoolFromEnv(const char *flag, bool default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
}
int32_t Int32FromEnv(const char* flag, int32_t default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
int32_t value = default_val;
if (value_str == nullptr ||
!ParseInt32(std::string("Environment variable ") + env_var, value_str,
&value)) {
return default_val;
}
return value;
int32_t Int32FromEnv(const char *flag, int32_t default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
int32_t value = default_val;
if (value_str == nullptr || !ParseInt32(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
double DoubleFromEnv(const char* flag, double default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value_str = getenv(env_var.c_str());
double value = default_val;
if (value_str == nullptr ||
!ParseDouble(std::string("Environment variable ") + env_var, value_str,
&value)) {
return default_val;
}
return value;
double DoubleFromEnv(const char *flag, double default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
double value = default_val;
if (value_str == nullptr || !ParseDouble(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
const char* StringFromEnv(const char* flag, const char* default_val) {
const std::string env_var = FlagToEnvVar(flag);
const char* const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
const char *StringFromEnv(const char *flag, const char *default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
}
// Parses a string as a command line flag. The string should have
@ -134,95 +143,109 @@ const char* StringFromEnv(const char* flag, const char* default_val) {
// part can be omitted.
//
// Returns the value of the flag, or nullptr if the parsing failed.
const char* ParseFlagValue(const char* str, const char* flag,
bool def_optional) {
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr) return nullptr;
const char *ParseFlagValue(const char *str, const char *flag, bool def_optional)
{
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr)
return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0)
return nullptr;
// Skips the flag name.
const char* flag_end = str + flag_len;
// Skips the flag name.
const char *flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0')) return flag_end;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0'))
return flag_end;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=') return nullptr;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=')
return nullptr;
// Returns the string after "=".
return flag_end + 1;
// Returns the string after "=".
return flag_end + 1;
}
bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true);
bool ParseBoolFlag(const char *str, const char *flag, bool *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseDoubleFlag(const char* str, const char* flag, double* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str,
value);
}
bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr) return false;
*value = value_str;
return true;
}
bool IsFlag(const char* str, const char* flag) {
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string& value) {
if (value.size() == 1) {
char v = value[0];
return isalnum(v) &&
!(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
} else if (!value.empty()) {
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" ||
value_lower == "off");
} else
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
} // end namespace benchmark
bool ParseInt32Flag(const char *str, const char *flag, int32_t *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseDoubleFlag(const char *str, const char *flag, double *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseStringFlag(const char *str, const char *flag, std::string *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
*value = value_str;
return true;
}
bool IsFlag(const char *str, const char *flag)
{
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string &value)
{
if (value.size() == 1)
{
char v = value[0];
return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
}
else if (!value.empty())
{
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" || value_lower == "off");
}
else
return true;
}
} // end namespace benchmark

View File

@ -14,48 +14,41 @@
#define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val) \
bool FLAG(name) = \
benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \
int32_t FLAG(name) = \
benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
double FLAG(name) = \
benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
std::string FLAG(name) = \
benchmark::StringFromEnv(#name, default_val)
#define DEFINE_bool(name, default_val) bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
namespace benchmark {
namespace benchmark
{
// Parses a bool from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value.
bool BoolFromEnv(const char* flag, bool default_val);
bool BoolFromEnv(const char *flag, bool default_val);
// Parses an Int32 from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns ParseInt32() value; if not, returns
// the given default value.
int32_t Int32FromEnv(const char* flag, int32_t default_val);
int32_t Int32FromEnv(const char *flag, int32_t default_val);
// Parses an Double from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns ParseDouble(); if not, returns
// the given default value.
double DoubleFromEnv(const char* flag, double default_val);
double DoubleFromEnv(const char *flag, double default_val);
// Parses a string from the environment variable
// corresponding to the given flag.
//
// If variable exists, returns its value; if not, returns
// the given default value.
const char* StringFromEnv(const char* flag, const char* default_val);
const char *StringFromEnv(const char *flag, const char *default_val);
// Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag".
@ -66,38 +59,38 @@ const char* StringFromEnv(const char* flag, const char* default_val);
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char* str, const char* flag, bool* value);
bool ParseBoolFlag(const char *str, const char *flag, bool *value);
// Parses a string for an Int32 flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char* str, const char* flag, int32_t* value);
bool ParseInt32Flag(const char *str, const char *flag, int32_t *value);
// Parses a string for a Double flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char* str, const char* flag, double* value);
bool ParseDoubleFlag(const char *str, const char *flag, double *value);
// Parses a string for a string flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseStringFlag(const char* str, const char* flag, std::string* value);
bool ParseStringFlag(const char *str, const char *flag, std::string *value);
// Returns true if the string matches the flag.
bool IsFlag(const char* str, const char* flag);
bool IsFlag(const char *str, const char *flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string.
bool IsTruthyFlagValue(const std::string& value);
bool IsTruthyFlagValue(const std::string &value);
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_
#endif // BENCHMARK_COMMANDLINEFLAGS_H_

View File

@ -17,56 +17,58 @@
#include "benchmark/benchmark.h"
#include <algorithm>
#include <cmath>
#include "check.h"
#include "complexity.h"
#include <algorithm>
#include <cmath>
namespace benchmark {
namespace benchmark
{
// Internal function to calculate the different scalability forms
BigOFunc* FittingCurve(BigO complexity) {
static const double kLog2E = 1.44269504088896340736;
switch (complexity) {
BigOFunc *FittingCurve(BigO complexity)
{
static const double kLog2E = 1.44269504088896340736;
switch (complexity)
{
case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); };
return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared:
return [](IterationCount n) -> double { return std::pow(n, 2); };
return [](IterationCount n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](IterationCount n) -> double { return std::pow(n, 3); };
return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return
[](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
};
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); };
case o1:
default:
return [](IterationCount) { return 1.0; };
}
return [](IterationCount) { return 1.0; };
}
}
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity) {
switch (complexity) {
std::string GetBigOString(BigO complexity)
{
switch (complexity)
{
case oN:
return "N";
return "N";
case oNSquared:
return "N^2";
return "N^2";
case oNCubed:
return "N^3";
return "N^3";
case oLogN:
return "lgN";
return "lgN";
case oNLogN:
return "NlgN";
return "NlgN";
case o1:
return "(1)";
return "(1)";
default:
return "f(N)";
}
return "f(N)";
}
}
// Find the coefficient for the high-order term in the running time, by
@ -79,41 +81,42 @@ std::string GetBigOString(BigO complexity) {
// For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time,
BigOFunc* fitting_curve) {
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, BigOFunc *fitting_curve)
{
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i) {
double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i)
{
double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = oLambda;
LeastSq result;
result.complexity = oLambda;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i) {
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i)
{
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean;
// Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean;
return result;
return result;
}
// Find the coefficient for the high-order term in the running time, by
@ -123,116 +126,126 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time, const BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, const BigO complexity)
{
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq best_fit;
LeastSq best_fit;
if (complexity == oAuto) {
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
if (complexity == oAuto)
{
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) {
best_fit = current_fit;
best_fit.complexity = fit;
}
// Compute all possible fitting curves and stick to the best one
for (const auto &fit : fit_curves)
{
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms)
{
best_fit = current_fit;
best_fit.complexity = fit;
}
}
}
else
{
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
} else {
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
return best_fit;
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2) return results;
if (reports.size() < 2)
return results;
// Accumulators.
std::vector<int64_t> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Accumulators.
std::vector<int64_t> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run& run : reports) {
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
// Populate the accumulators.
for (const Run &run : reports)
{
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
LeastSq result_cpu;
LeastSq result_real;
LeastSq result_cpu;
LeastSq result_real;
if (reports[0].complexity == oLambda) {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
} else {
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
if (reports[0].complexity == oLambda)
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
}
else
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
// Drop the 'args' when reporting complexity.
auto run_name = reports[0].run_name;
run_name.args.clear();
// Drop the 'args' when reporting complexity.
auto run_name = reports[0].run_name;
run_name.args.clear();
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.run_name = run_name;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
big_o.repetitions = reports[0].repetitions;
big_o.repetition_index = Run::no_repetition_index;
big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO";
big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.run_name = run_name;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
big_o.repetitions = reports[0].repetitions;
big_o.repetition_index = Run::no_repetition_index;
big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO";
big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
// All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the
// correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the
// correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
Run rms;
rms.run_name = run_name;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.repetition_index = Run::no_repetition_index;
rms.repetitions = reports[0].repetitions;
rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to
// recover the correct value.
rms.time_unit = reports[0].time_unit;
// Only add label to mean/stddev if it is same for all runs
Run rms;
rms.run_name = run_name;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.repetition_index = Run::no_repetition_index;
rms.repetitions = reports[0].repetitions;
rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to
// recover the correct value.
rms.time_unit = reports[0].time_unit;
results.push_back(big_o);
results.push_back(rms);
return results;
results.push_back(big_o);
results.push_back(rms);
return results;
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -23,12 +23,12 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(
const std::vector<BenchmarkReporter::Run>& reports);
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
@ -39,17 +39,20 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected.
struct LeastSq {
LeastSq() : coef(0.0), rms(0.0), complexity(oNone) {}
struct LeastSq
{
LeastSq() : coef(0.0), rms(0.0), complexity(oNone)
{
}
double coef;
double rms;
BigO complexity;
double coef;
double rms;
BigO complexity;
};
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
} // end namespace benchmark
} // end namespace benchmark
#endif // COMPLEXITY_H_
#endif // COMPLEXITY_H_

View File

@ -31,147 +31,164 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
bool ConsoleReporter::ReportContext(const Context& context) {
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
bool ConsoleReporter::ReportContext(const Context &context)
{
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
PrintBasicContext(&GetErrorStream(), context);
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) {
GetErrorStream()
<< "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color);
}
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream())
{
GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
}
#endif
return true;
return true;
}
void ConsoleReporter::PrintHeader(const Run& run) {
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_),
"Benchmark", "Time", "CPU", "Iterations");
if(!run.counters.empty()) {
if(output_options_ & OO_Tabular) {
for(auto const& c : run.counters) {
str += FormatString(" %10s", c.first.c_str());
}
} else {
str += " UserCounters...";
void ConsoleReporter::PrintHeader(const Run &run)
{
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), "Benchmark", "Time",
"CPU", "Iterations");
if (!run.counters.empty())
{
if (output_options_ & OO_Tabular)
{
for (auto const &c : run.counters)
{
str += FormatString(" %10s", c.first.c_str());
}
}
else
{
str += " UserCounters...";
}
}
}
std::string line = std::string(str.length(), '-');
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
std::string line = std::string(str.length(), '-');
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& run : reports) {
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) &&
(!internal::SameNames(run.counters, prev_counters_));
if (print_header) {
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
void ConsoleReporter::ReportRuns(const std::vector<Run> &reports)
{
for (const auto &run : reports)
{
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) && (!internal::SameNames(run.counters, prev_counters_));
if (print_header)
{
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
}
static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt,
...) {
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
static void IgnoreColorPrint(std::ostream &out, LogColor, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}
static std::string FormatTime(double time) {
// Align decimal places...
if (time < 1.0) {
return FormatString("%10.3f", time);
}
if (time < 10.0) {
return FormatString("%10.2f", time);
}
if (time < 100.0) {
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
static std::string FormatTime(double time)
{
// Align decimal places...
if (time < 1.0)
{
return FormatString("%10.3f", time);
}
if (time < 10.0)
{
return FormatString("%10.2f", time);
}
if (time < 100.0)
{
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
}
void ConsoleReporter::PrintRunData(const Run& result) {
typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...);
auto& Out = GetOutputStream();
PrinterFn* printer = (output_options_ & OO_Color) ?
(PrinterFn*)ColorPrintf : IgnoreColorPrint;
auto name_color =
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name().c_str());
void ConsoleReporter::PrintRunData(const Run &result)
{
typedef void(PrinterFn)(std::ostream &, LogColor, const char *, ...);
auto &Out = GetOutputStream();
PrinterFn *printer = (output_options_ & OO_Color) ? (PrinterFn *)ColorPrintf : IgnoreColorPrint;
auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str());
if (result.error_occurred)
{
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o)
{
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), cpu_time, big_o.c_str());
}
else if (result.report_rms)
{
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%");
}
else
{
const char *timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, cpu_time_str.c_str(),
timeLabel);
}
if (!result.report_big_o && !result.report_rms)
{
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto &c : result.counters)
{
const std::size_t cNameLen = std::max(std::string::size_type(10), c.first.length());
auto const &s = HumanReadableNumber(c.second.value, c.second.oneK);
const char *unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular)
{
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit);
}
else
{
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty())
{
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
if (result.error_occurred) {
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o) {
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(),
cpu_time, big_o.c_str());
} else if (result.report_rms) {
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%",
cpu_time * 100, "%");
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel,
cpu_time_str.c_str(), timeLabel);
}
if (!result.report_big_o && !result.report_rms) {
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto& c : result.counters) {
const std::size_t cNameLen = std::max(std::string::size_type(10),
c.first.length());
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
const char* unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular) {
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(),
unit);
} else {
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty()) {
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
printer(Out, COLOR_DEFAULT, "\n");
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -14,67 +14,85 @@
#include "counter.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
double Finish(Counter const& c, IterationCount iterations, double cpu_time,
double num_threads) {
double v = c.value;
if (c.flags & Counter::kIsRate) {
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads) {
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant) {
v *= iterations;
}
if (c.flags & Counter::kAvgIterations) {
v /= iterations;
}
if (c.flags & Counter::kInvert) { // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
void Finish(UserCounters* l, IterationCount iterations, double cpu_time,
double num_threads) {
for (auto& c : *l) {
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
void Increment(UserCounters* l, UserCounters const& r) {
// add counters present in both or just in *l
for (auto& c : *l) {
auto it = r.find(c.first);
if (it != r.end()) {
c.second.value = c.second + it->second;
double Finish(Counter const &c, IterationCount iterations, double cpu_time, double num_threads)
{
double v = c.value;
if (c.flags & Counter::kIsRate)
{
v /= cpu_time;
}
}
// add counters present in r, but not in *l
for (auto const& tc : r) {
auto it = l->find(tc.first);
if (it == l->end()) {
(*l)[tc.first] = tc.second;
if (c.flags & Counter::kAvgThreads)
{
v /= num_threads;
}
}
if (c.flags & Counter::kIsIterationInvariant)
{
v *= iterations;
}
if (c.flags & Counter::kAvgIterations)
{
v /= iterations;
}
if (c.flags & Counter::kInvert)
{ // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
bool SameNames(UserCounters const& l, UserCounters const& r) {
if (&l == &r) return true;
if (l.size() != r.size()) {
return false;
}
for (auto const& c : l) {
if (r.find(c.first) == r.end()) {
return false;
void Finish(UserCounters *l, IterationCount iterations, double cpu_time, double num_threads)
{
for (auto &c : *l)
{
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark
void Increment(UserCounters *l, UserCounters const &r)
{
// add counters present in both or just in *l
for (auto &c : *l)
{
auto it = r.find(c.first);
if (it != r.end())
{
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const &tc : r)
{
auto it = l->find(tc.first);
if (it == l->end())
{
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const &l, UserCounters const &r)
{
if (&l == &r)
return true;
if (l.size() != r.size())
{
return false;
}
for (auto const &c : l)
{
if (r.find(c.first) == r.end())
{
return false;
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark

View File

@ -17,16 +17,17 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// these counter-related functions are hidden to reduce API surface.
namespace internal {
void Finish(UserCounters* l, IterationCount iterations, double time,
double num_threads);
void Increment(UserCounters* l, UserCounters const& r);
bool SameNames(UserCounters const& l, UserCounters const& r);
} // end namespace internal
namespace internal
{
void Finish(UserCounters *l, IterationCount iterations, double time, double num_threads);
void Increment(UserCounters *l, UserCounters const &r);
bool SameNames(UserCounters const &l, UserCounters const &r);
} // end namespace internal
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_COUNTER_H_
#endif // BENCHMARK_COUNTER_H_

View File

@ -28,127 +28,159 @@
// File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark {
namespace benchmark
{
namespace {
std::vector<std::string> elements = {
"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
namespace
{
std::vector<std::string> elements = {"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
std::string CsvEscape(const std::string & s) {
std::string tmp;
tmp.reserve(s.size() + 2);
for (char c : s) {
switch (c) {
case '"' : tmp += "\"\""; break;
default : tmp += c; break;
std::string CsvEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size() + 2);
for (char c : s)
{
switch (c)
{
case '"':
tmp += "\"\"";
break;
default:
tmp += c;
break;
}
}
}
return '"' + tmp + '"';
return '"' + tmp + '"';
}
bool CSVReporter::ReportContext(const Context& context) {
PrintBasicContext(&GetErrorStream(), context);
return true;
bool CSVReporter::ReportContext(const Context &context)
{
PrintBasicContext(&GetErrorStream(), context);
return true;
}
void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
std::ostream& Out = GetOutputStream();
void CSVReporter::ReportRuns(const std::vector<Run> &reports)
{
std::ostream &Out = GetOutputStream();
if (!printed_header_) {
// save the names of all the user counters
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
}
if (!printed_header_)
{
// save the names of all the user counters
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();)
{
Out << *B++;
if (B != elements.end())
Out << ",";
}
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();)
{
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
}
else
{
// check that all the current counters are saved in the name set
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first << "\" was not in a run after being added to the header";
}
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();) {
Out << *B++;
if (B != elements.end()) Out << ",";
// print results for each run
for (const auto &run : reports)
{
PrintRunData(run);
}
for (auto B = user_counter_names_.begin();
B != user_counter_names_.end();) {
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
} else {
// check that all the current counters are saved in the name set
for (const auto& run : reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto& run : reports) {
PrintRunData(run);
}
}
void CSVReporter::PrintRunData(const Run& run) {
std::ostream& Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred) {
Out << std::string(elements.size() - 3, ',');
Out << "true,";
Out << CsvEscape(run.error_message) << "\n";
return;
}
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o) {
Out << GetBigOString(run.complexity);
} else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end()) {
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end()) {
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty()) {
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto& ucn : user_counter_names_) {
auto it = run.counters.find(ucn);
if (it == run.counters.end()) {
Out << ",";
} else {
Out << "," << it->second;
void CSVReporter::PrintRunData(const Run &run)
{
std::ostream &Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred)
{
Out << std::string(elements.size() - 3, ',');
Out << "true,";
Out << CsvEscape(run.error_message) << "\n";
return;
}
}
Out << '\n';
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms)
{
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o)
{
Out << GetBigOString(run.complexity);
}
else if (!run.report_rms)
{
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end())
{
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end())
{
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty())
{
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto &ucn : user_counter_names_)
{
auto it = run.counters.find(ucn);
if (it == run.counters.end())
{
Out << ",";
}
else
{
Out << "," << it->second;
}
}
Out << '\n';
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -50,148 +50,151 @@ extern "C" uint64_t __rdtsc();
#include <emscripten.h>
#endif
namespace benchmark {
namespace benchmark
{
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock {
namespace cycleclock
{
// This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
inline BENCHMARK_ALWAYS_INLINE int64_t Now()
{
#if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6);
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6);
#elif defined(__i386__)
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
// This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__)
int64_t tb;
asm volatile("mfspr %0, 268" : "=r"(tb));
return tb;
int64_t tb;
asm volatile("mfspr %0, 268" : "=r"(tb));
return tb;
#else
uint32_t tbl, tbu0, tbu1;
asm volatile(
"mftbu %0\n"
"mftbl %1\n"
"mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
tbl &= -static_cast<int32_t>(tbu0 == tbu1);
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
uint32_t tbl, tbu0, tbu1;
asm volatile("mftbu %0\n"
"mftbl %1\n"
"mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
tbl &= -static_cast<int32_t>(tbu0 == tbu1);
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
#endif
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick));
return tick;
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick));
return tick;
#elif defined(__ia64__)
int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc));
return itc;
int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc));
return itc;
#elif defined(COMPILER_MSVC) && defined(_M_IX86)
// Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
// Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
#elif defined(COMPILER_MSVC)
return __rdtsc();
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
// Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics.
// Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics.
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
#elif defined(__ARM_ARCH)
// V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions.
// V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions.
#if (__ARM_ARCH >= 6)
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) { // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1)
{ // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul)
{ // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
}
}
#endif
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__mips__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
asm("stck %0" : "=Q"(tsc) : : "cc");
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
asm("stck %0" : "=Q"(tsc) : : "cc");
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile(
"rdcycleh %0\n"
"rdcycle %1\n"
"rdcycleh %2\n"
"sub %0, %0, %2\n"
"seqz %0, %0\n"
"sub %0, zero, %0\n"
"and %1, %1, %0\n"
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile("rdcycleh %0\n"
"rdcycle %1\n"
"rdcycleh %2\n"
"sub %0, %0, %2\n"
"seqz %0, %0\n"
"sub %0, zero, %0\n"
"and %1, %1, %0\n"
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else
uint64_t cycles;
asm volatile("rdcycle %0" : "=r"(cycles));
return cycles;
uint64_t cycles;
asm volatile("rdcycle %0" : "=r"(cycles));
return cycles;
#endif
#else
// The soft failover to a generic implementation is automatic only for ARM.
@ -200,7 +203,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#error You need to define CycleTimer for your OS and CPU
#endif
}
} // end namespace cycleclock
} // end namespace benchmark
} // end namespace cycleclock
} // end namespace benchmark
#endif // BENCHMARK_CYCLECLOCK_H_
#endif // BENCHMARK_CYCLECLOCK_H_

View File

@ -91,4 +91,4 @@
// clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_
#endif // BENCHMARK_INTERNAL_MACROS_H_

View File

@ -18,7 +18,7 @@
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iomanip> // for setprecision
#include <iomanip> // for setprecision
#include <iostream>
#include <limits>
#include <string>
@ -28,226 +28,267 @@
#include "string_util.h"
#include "timers.h"
namespace benchmark {
namespace benchmark
{
namespace {
namespace
{
std::string StrEscape(const std::string & s) {
std::string tmp;
tmp.reserve(s.size());
for (char c : s) {
switch (c) {
case '\b': tmp += "\\b"; break;
case '\f': tmp += "\\f"; break;
case '\n': tmp += "\\n"; break;
case '\r': tmp += "\\r"; break;
case '\t': tmp += "\\t"; break;
case '\\': tmp += "\\\\"; break;
case '"' : tmp += "\\\""; break;
default : tmp += c; break;
std::string StrEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size());
for (char c : s)
{
switch (c)
{
case '\b':
tmp += "\\b";
break;
case '\f':
tmp += "\\f";
break;
case '\n':
tmp += "\\n";
break;
case '\r':
tmp += "\\r";
break;
case '\t':
tmp += "\\t";
break;
case '\\':
tmp += "\\\\";
break;
case '"':
tmp += "\\\"";
break;
default:
tmp += c;
break;
}
}
}
return tmp;
return tmp;
}
std::string FormatKV(std::string const& key, std::string const& value) {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
std::string FormatKV(std::string const &key, std::string const &value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, const char* value) {
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
std::string FormatKV(std::string const &key, const char *value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const& key, bool value) {
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
std::string FormatKV(std::string const &key, bool value)
{
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const& key, int64_t value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
std::string FormatKV(std::string const &key, int64_t value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, IterationCount value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
std::string FormatKV(std::string const &key, IterationCount value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const& key, double value) {
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
std::string FormatKV(std::string const &key, double value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
if (std::isnan(value))
ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity";
else {
const auto max_digits10 =
std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10)
<< value;
}
return ss.str();
if (std::isnan(value))
ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity";
else
{
const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
}
return ss.str();
}
int64_t RoundDouble(double v) { return std::lround(v); }
int64_t RoundDouble(double v)
{
return std::lround(v);
}
} // end namespace
} // end namespace
bool JSONReporter::ReportContext(const Context& context) {
std::ostream& out = GetOutputStream();
bool JSONReporter::ReportContext(const Context &context)
{
std::ostream &out = GetOutputStream();
out << "{\n";
std::string inner_indent(2, ' ');
out << "{\n";
std::string inner_indent(2, ' ');
// Open context block and print context information.
out << inner_indent << "\"context\": {\n";
std::string indent(4, ' ');
// Open context block and print context information.
out << inner_indent << "\"context\": {\n";
std::string indent(4, ' ');
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) {
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
if (Context::executable_name)
{
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const& info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
<< ",\n";
out << indent
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
<< ",\n";
CPUInfo const &info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) << ",\n";
out << indent << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) << ",\n";
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i) {
auto& CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level))
<< ",\n";
out << cache_indent
<< FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent
<< FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing))
<< "\n";
out << indent << "}";
if (i != info.caches.size() - 1) out << ",";
out << "\n";
}
indent = std::string(4, ' ');
out << indent << "],\n";
out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
out << *it++;
if (it != info.load_avg.end()) out << ",";
}
out << "],\n";
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i)
{
auto &CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) << ",\n";
out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) << "\n";
out << indent << "}";
if (i != info.caches.size() - 1)
out << ",";
out << "\n";
}
indent = std::string(4, ' ');
out << indent << "],\n";
out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();)
{
out << *it++;
if (it != info.load_avg.end())
out << ",";
}
out << "],\n";
#if defined(NDEBUG)
const char build_type[] = "release";
const char build_type[] = "release";
#else
const char build_type[] = "debug";
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type) << "\n";
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
return true;
out << indent << FormatKV("library_build_type", build_type) << "\n";
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
return true;
}
void JSONReporter::ReportRuns(std::vector<Run> const& reports) {
if (reports.empty()) {
return;
}
std::string indent(4, ' ');
std::ostream& out = GetOutputStream();
if (!first_report_) {
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end()) {
out << ",\n";
void JSONReporter::ReportRuns(std::vector<Run> const &reports)
{
if (reports.empty())
{
return;
}
}
}
void JSONReporter::Finalize() {
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
std::string indent(6, ' ');
std::ostream& out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char* {
switch (run.run_type) {
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
std::string indent(4, ' ');
std::ostream &out = GetOutputStream();
if (!first_report_)
{
out << ",\n";
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("repetition_index", run.repetition_index)
<< ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred) {
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms) {
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n"
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_big_o) {
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
<< ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
<< ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
} else if (run.report_rms) {
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
first_report_ = false;
for (auto& c : run.counters) {
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result) {
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty()) {
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
for (auto it = reports.begin(); it != reports.end(); ++it)
{
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end())
{
out << ",\n";
}
}
}
} // end namespace benchmark
void JSONReporter::Finalize()
{
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const &run)
{
std::string indent(6, ' ');
std::ostream &out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char * {
switch (run.run_type)
{
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("repetition_index", run.repetition_index) << ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred)
{
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms)
{
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_big_o)
{
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_rms)
{
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto &c : run.counters)
{
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result)
{
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty())
{
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark

View File

@ -6,65 +6,77 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
typedef std::basic_ostream<char>&(EndLType)(std::basic_ostream<char>&);
typedef std::basic_ostream<char> &(EndLType)(std::basic_ostream<char> &);
class LogType {
friend LogType& GetNullLogInstance();
friend LogType& GetErrorLogInstance();
class LogType
{
friend LogType &GetNullLogInstance();
friend LogType &GetErrorLogInstance();
// FIXME: Add locking to output.
template <class Tp>
friend LogType& operator<<(LogType&, Tp const&);
friend LogType& operator<<(LogType&, EndLType*);
// FIXME: Add locking to output.
template <class Tp> friend LogType &operator<<(LogType &, Tp const &);
friend LogType &operator<<(LogType &, EndLType *);
private:
LogType(std::ostream* out) : out_(out) {}
std::ostream* out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
private:
LogType(std::ostream *out) : out_(out)
{
}
std::ostream *out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
};
template <class Tp>
LogType& operator<<(LogType& log, Tp const& value) {
if (log.out_) {
*log.out_ << value;
}
return log;
template <class Tp> LogType &operator<<(LogType &log, Tp const &value)
{
if (log.out_)
{
*log.out_ << value;
}
return log;
}
inline LogType& operator<<(LogType& log, EndLType* m) {
if (log.out_) {
*log.out_ << m;
}
return log;
inline LogType &operator<<(LogType &log, EndLType *m)
{
if (log.out_)
{
*log.out_ << m;
}
return log;
}
inline int& LogLevel() {
static int log_level = 0;
return log_level;
inline int &LogLevel()
{
static int log_level = 0;
return log_level;
}
inline LogType& GetNullLogInstance() {
static LogType log(nullptr);
return log;
inline LogType &GetNullLogInstance()
{
static LogType log(nullptr);
return log;
}
inline LogType& GetErrorLogInstance() {
static LogType log(&std::clog);
return log;
inline LogType &GetErrorLogInstance()
{
static LogType log(&std::clog);
return log;
}
inline LogType& GetLogInstanceForLevel(int level) {
if (level <= LogLevel()) {
return GetErrorLogInstance();
}
return GetNullLogInstance();
inline LogType &GetLogInstanceForLevel(int level)
{
if (level <= LogLevel())
{
return GetErrorLogInstance();
}
return GetNullLogInstance();
}
} // end namespace internal
} // end namespace benchmark
} // end namespace internal
} // end namespace benchmark
// clang-format off
#define VLOG(x) \

View File

@ -11,7 +11,7 @@
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
@ -22,49 +22,38 @@
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark {
namespace benchmark
{
typedef std::condition_variable Condition;
@ -72,84 +61,114 @@ typedef std::condition_variable Condition;
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex {
public:
Mutex() {}
void lock() ACQUIRE() { mut_.lock(); }
void unlock() RELEASE() { mut_.unlock(); }
std::mutex& native_handle() { return mut_; }
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock {
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {}
~MutexLock() RELEASE() {}
MutexLockImp& native_handle() { return ml_; }
private:
MutexLockImp ml_;
};
class Barrier {
public:
Barrier(int num_threads) : running_threads_(num_threads) {}
// Called by each thread
bool wait() EXCLUDES(lock_) {
bool last_thread = false;
class CAPABILITY("mutex") Mutex
{
public:
Mutex()
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread) phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_) {
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0) phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp) return false;
// else (running_threads_ == entered_) and we are the last thread.
void lock() ACQUIRE()
{
mut_.lock();
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
void unlock() RELEASE()
{
mut_.unlock();
}
std::mutex &native_handle()
{
return mut_;
}
private:
std::mutex mut_;
};
} // end namespace benchmark
class SCOPED_CAPABILITY MutexLock
{
typedef std::unique_lock<std::mutex> MutexLockImp;
#endif // BENCHMARK_MUTEX_H_
public:
MutexLock(Mutex &m) ACQUIRE(m) : ml_(m.native_handle())
{
}
~MutexLock() RELEASE()
{
}
MutexLockImp &native_handle()
{
return ml_;
}
private:
MutexLockImp ml_;
};
class Barrier
{
public:
Barrier(int num_threads) : running_threads_(num_threads)
{
}
// Called by each thread
bool wait() EXCLUDES(lock_)
{
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread)
phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_)
{
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0)
phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock &ml) REQUIRES(lock_)
{
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_)
{
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_

View File

@ -54,32 +54,36 @@
#include "check.h"
namespace benchmark {
namespace benchmark
{
// A wrapper around the POSIX regular expression API that provides automatic
// cleanup
class Regex {
public:
Regex() : init_(false) {}
class Regex
{
public:
Regex() : init_(false)
{
}
~Regex();
~Regex();
// Compile a regular expression matcher from spec. Returns true on success.
//
// On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs.
bool Init(const std::string& spec, std::string* error);
// Compile a regular expression matcher from spec. Returns true on success.
//
// On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs.
bool Init(const std::string &spec, std::string *error);
// Returns whether str matches the compiled regular expression.
bool Match(const std::string& str);
// Returns whether str matches the compiled regular expression.
bool Match(const std::string &str);
private:
bool init_;
private:
bool init_;
// Underlying regular expression object
#if defined(HAVE_STD_REGEX)
std::regex re_;
std::regex re_;
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
regex_t re_;
#else
#error No regular expression backend implementation available
#endif
@ -87,72 +91,87 @@ class Regex {
#if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string& spec, std::string* error) {
inline bool Regex::Init(const std::string &spec, std::string *error)
{
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning
((void)error); // suppress unused warning
#else
try {
try
{
#endif
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
}
catch (const std::regex_error& e) {
if (error) {
*error = e.what();
}
catch (const std::regex_error &e)
{
if (error)
{
*error = e.what();
}
}
#endif
return init_;
}
inline Regex::~Regex() {}
inline Regex::~Regex()
{
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return std::regex_search(str, re_);
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return std::regex_search(str, re_);
}
#else
inline bool Regex::Init(const std::string& spec, std::string* error) {
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0) {
if (error) {
size_t needed = regerror(ec, &re_, nullptr, 0);
char* errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
inline bool Regex::Init(const std::string &spec, std::string *error)
{
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
if (error)
{
size_t needed = regerror(ec, &re_, nullptr, 0);
char *errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
delete[] errbuf;
delete[] errbuf;
}
return false;
}
return false;
}
init_ = true;
return true;
init_ = true;
return true;
}
inline Regex::~Regex() {
if (init_) {
regfree(&re_);
}
inline Regex::~Regex()
{
if (init_)
{
regfree(&re_);
}
}
inline bool Regex::Match(const std::string& str) {
if (!init_) {
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
}
#endif
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_RE_H_
#endif // BENCHMARK_RE_H_

View File

@ -24,82 +24,97 @@
#include "check.h"
#include "string_util.h"
namespace benchmark {
namespace benchmark
{
BenchmarkReporter::BenchmarkReporter()
: output_stream_(&std::cout), error_stream_(&std::cerr) {}
BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr)
{
}
BenchmarkReporter::~BenchmarkReporter() {}
BenchmarkReporter::~BenchmarkReporter()
{
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Context const &context) {
CHECK(out) << "cannot be null";
auto &Out = *out;
void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context)
{
CHECK(out) << "cannot be null";
auto &Out = *out;
Out << LocalDateTimeString() << "\n";
Out << LocalDateTimeString() << "\n";
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X "
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0) {
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches) {
Out << " L" << CInfo.level << " " << CInfo.type << " "
<< (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0)
{
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches)
{
Out << " L" << CInfo.level << " " << CInfo.type << " " << (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
}
}
}
if (!info.load_avg.empty()) {
Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end()) Out << ", ";
if (!info.load_avg.empty())
{
Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();)
{
Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end())
Out << ", ";
}
Out << "\n";
}
Out << "\n";
}
if (info.scaling_enabled) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
if (info.scaling_enabled)
{
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
}
// No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context()
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
std::string BenchmarkReporter::Run::benchmark_name() const {
std::string name = run_name.str();
if (run_type == RT_Aggregate) {
name += "_" + aggregate_name;
}
return name;
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get())
{
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
std::string BenchmarkReporter::Run::benchmark_name() const
{
std::string name = run_name.str();
if (run_type == RT_Aggregate)
{
name += "_" + aggregate_name;
}
return name;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const {
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0) new_time /= static_cast<double>(iterations);
return new_time;
double BenchmarkReporter::Run::GetAdjustedRealTime() const
{
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark
double BenchmarkReporter::Run::GetAdjustedCPUTime() const
{
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark

View File

@ -24,28 +24,36 @@
#include <windows.h>
#endif
namespace benchmark {
namespace benchmark
{
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); }
void SleepForSeconds(double seconds) {
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
void SleepForMilliseconds(int milliseconds)
{
Sleep(milliseconds);
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
void SleepForSeconds(double seconds)
{
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds)
{
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
}
void SleepForMilliseconds(int milliseconds) {
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
void SleepForMilliseconds(int milliseconds)
{
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
}
void SleepForSeconds(double seconds) {
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
void SleepForSeconds(double seconds)
{
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
}
#endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark
#endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark

View File

@ -1,7 +1,8 @@
#ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_
namespace benchmark {
namespace benchmark
{
const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
@ -10,6 +11,6 @@ const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
void SleepForMilliseconds(int milliseconds);
void SleepForSeconds(double seconds);
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_SLEEP_H_
#endif // BENCHMARK_SLEEP_H_

View File

@ -15,179 +15,195 @@
#include "benchmark/benchmark.h"
#include "check.h"
#include "statistics.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <string>
#include <vector>
#include "check.h"
#include "statistics.h"
namespace benchmark {
namespace benchmark
{
auto StatisticsSum = [](const std::vector<double>& v) {
return std::accumulate(v.begin(), v.end(), 0.0);
};
auto StatisticsSum = [](const std::vector<double> &v) { return std::accumulate(v.begin(), v.end(), 0.0); };
double StatisticsMean(const std::vector<double>& v) {
if (v.empty()) return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
double StatisticsMean(const std::vector<double> &v)
{
if (v.empty())
return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
}
double StatisticsMedian(const std::vector<double>& v) {
if (v.size() < 3) return StatisticsMean(v);
std::vector<double> copy(v);
double StatisticsMedian(const std::vector<double> &v)
{
if (v.size() < 3)
return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
// did we have an odd number of samples?
// if yes, then center is the median
// it no, then we are looking for the average between center and the value
// before
if (v.size() % 2 == 1) return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
// did we have an odd number of samples?
// if yes, then center is the median
// it no, then we are looking for the average between center and the value
// before
if (v.size() % 2 == 1)
return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
}
// Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double>& v) {
return std::inner_product(v.begin(), v.end(), v.begin(), 0.0);
};
auto SumSquares = [](const std::vector<double> &v) { return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); };
auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations
if (dat < 0.0) return 0.0;
return std::sqrt(dat);
// Avoid NaN due to imprecision in the calculations
if (dat < 0.0)
return 0.0;
return std::sqrt(dat);
};
double StatisticsStdDev(const std::vector<double>& v) {
const auto mean = StatisticsMean(v);
if (v.empty()) return mean;
double StatisticsStdDev(const std::vector<double> &v)
{
const auto mean = StatisticsMean(v);
if (v.empty())
return mean;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1) return 0.0;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1)
return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
}
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count =
std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.error_occurred; });
auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const &run) { return run.error_occurred; });
if (reports.size() - error_count < 2)
{
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat
{
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const &r : reports)
{
for (auto const &cnt : r.counters)
{
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end())
{
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
}
else
{
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const &run : reports)
{
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred)
continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const &cnt : run.counters)
{
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++)
{
if (reports[i].report_label != report_label)
{
report_label = "";
break;
}
}
const double iteration_rescale_factor = double(reports.size()) / double(run_iterations);
for (const auto &Stat : *reports[0].statistics)
{
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const &kv : counter_stats)
{
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat {
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const& r : reports) {
for (auto const& cnt : r.counters) {
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end()) {
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
} else {
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++) {
if (reports[i].report_label != report_label) {
report_label = "";
break;
}
}
const double iteration_rescale_factor =
double(reports.size()) / double(run_iterations);
for (const auto& Stat : *reports[0].statistics) {
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const& kv : counter_stats) {
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
return results;
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -20,18 +20,18 @@
#include "benchmark/benchmark.h"
namespace benchmark {
namespace benchmark
{
// Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(
const std::vector<BenchmarkReporter::Run>& reports);
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports);
double StatisticsMean(const std::vector<double>& v);
double StatisticsMedian(const std::vector<double>& v);
double StatisticsStdDev(const std::vector<double>& v);
double StatisticsMean(const std::vector<double> &v);
double StatisticsMedian(const std::vector<double> &v);
double StatisticsStdDev(const std::vector<double> &v);
} // end namespace benchmark
} // end namespace benchmark
#endif // STATISTICS_H_
#endif // STATISTICS_H_

View File

@ -12,8 +12,10 @@
#include "arraysize.h"
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY";
@ -23,144 +25,159 @@ const char kBigIECUnits[] = "KMGTPEZY";
const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits),
"SI and IEC unit arrays must be the same size");
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), "SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision,
double one_k, std::string* mantissa,
int64_t* exponent) {
std::stringstream mantissa_stream;
void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string *mantissa,
int64_t *exponent)
{
std::stringstream mantissa_stream;
if (val < 0) {
mantissa_stream << "-";
val = -val;
}
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold =
std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold) {
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i) {
scaled /= one_k;
if (scaled <= big_threshold) {
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
if (val < 0)
{
mantissa_stream << "-";
val = -val;
}
mantissa_stream << val;
*exponent = 0;
} else if (val < small_threshold) {
// Negative powers
if (val < simple_threshold) {
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) {
scaled *= one_k;
if (scaled >= small_threshold) {
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold = std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold)
{
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i)
{
scaled /= one_k;
if (scaled <= big_threshold)
{
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
}
mantissa_stream << val;
*exponent = 0;
} else {
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
else if (val < small_threshold)
{
// Negative powers
if (val < simple_threshold)
{
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i)
{
scaled *= one_k;
if (scaled >= small_threshold)
{
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
}
else
{
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
}
std::string ExponentToPrefix(int64_t exponent, bool iec) {
if (exponent == 0) return "";
std::string ExponentToPrefix(int64_t exponent, bool iec)
{
if (exponent == 0)
return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize) return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize)
return "";
const char* array =
(exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
const char *array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
}
std::string ToBinaryStringFullySpecified(double value, double threshold,
int precision, double one_k = 1024.0) {
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa,
&exponent);
return mantissa + ExponentToPrefix(exponent, false);
std::string ToBinaryStringFullySpecified(double value, double threshold, int precision, double one_k = 1024.0)
{
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
} // end namespace
} // end namespace
void AppendHumanReadable(int n, std::string* str) {
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
void AppendHumanReadable(int n, std::string *str)
{
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
}
std::string HumanReadableNumber(double n, double one_k) {
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
std::string HumanReadableNumber(double n, double one_k)
{
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StrFormatImp(const char* msg, va_list args) {
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::string StrFormatImp(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
va_end(args_cp);
va_end(args_cp);
// handle empty expansion
if (ret == 0) return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// handle empty expansion
if (ret == 0)
return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get());
}
std::string StrFormat(const char* format, ...) {
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
va_end(args);
return tmp;
std::string StrFormat(const char *format, ...)
{
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
va_end(args);
return tmp;
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
@ -170,86 +187,95 @@ std::string StrFormat(const char* format, ...) {
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos, int base) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
unsigned long stoul(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base);
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base);
const int strtoulErrno = errno;
/* Restore previous errno */
errno = oldErrno;
const int strtoulErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtoulErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of unsigned long");
} else if (strEnd == strStart || strtoulErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
/* Check for errors and return */
if (strtoulErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of unsigned long");
}
else if (strEnd == strStart || strtoulErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
int stoi(const std::string& str, size_t* pos, int base) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
int stoi(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const long result = strtol(strStart, &strEnd, base);
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const long result = strtol(strStart, &strEnd, base);
const int strtolErrno = errno;
/* Restore previous errno */
errno = oldErrno;
const int strtolErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtolErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
/* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
else if (strEnd == strStart || strtolErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
}
double stod(const std::string& str, size_t* pos) {
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
double stod(const std::string &str, size_t *pos)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char* strStart = str.c_str();
char* strEnd = const_cast<char*>(strStart);
const double result = strtod(strStart, &strEnd);
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const double result = strtod(strStart, &strEnd);
/* Restore previous errno */
const int strtodErrno = errno;
errno = oldErrno;
/* Restore previous errno */
const int strtodErrno = errno;
errno = oldErrno;
/* Check for errors and return */
if (strtodErrno == ERANGE) {
throw std::out_of_range(
"stoul failed: " + str + " is outside of range of int");
} else if (strEnd == strStart || strtodErrno != 0) {
throw std::invalid_argument(
"stoul failed: " + str + " is not an integer");
}
if (pos != nullptr) {
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
/* Check for errors and return */
if (strtodErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
else if (strEnd == strStart || strtodErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
#endif
} // end namespace benchmark
} // end namespace benchmark

View File

@ -1,14 +1,15 @@
#ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_
#include "internal_macros.h"
#include <sstream>
#include <string>
#include <utility>
#include "internal_macros.h"
namespace benchmark {
namespace benchmark
{
void AppendHumanReadable(int n, std::string* str);
void AppendHumanReadable(int n, std::string *str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
@ -18,23 +19,24 @@ __attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
__attribute__((format(printf, 1, 2)))
#endif
std::string
StrFormat(const char* format, ...);
StrFormat(const char *format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out;
inline std::ostream &StrCatImp(std::ostream &out) BENCHMARK_NOEXCEPT
{
return out;
}
template <class First, class... Rest>
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
template <class First, class... Rest> inline std::ostream &StrCatImp(std::ostream &out, First &&f, Rest &&...rest)
{
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args>
inline std::string StrCat(Args&&... args) {
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
return ss.str();
template <class... Args> inline std::string StrCat(Args &&...args)
{
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
return ss.str();
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
@ -44,16 +46,15 @@ inline std::string StrCat(Args&&... args) {
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string& str, size_t* pos = nullptr,
int base = 10);
int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
double stod(const std::string& str, size_t* pos = nullptr);
unsigned long stoul(const std::string &str, size_t *pos = nullptr, int base = 10);
int stoi(const std::string &str, size_t *pos = nullptr, int base = 10);
double stod(const std::string &str, size_t *pos = nullptr);
#else
using std::stoul;
using std::stoi;
using std::stod;
using std::stoi;
using std::stoul;
#endif
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_STRING_UTIL_H_
#endif // BENCHMARK_STRING_UTIL_H_

View File

@ -6,59 +6,68 @@
#include "benchmark/benchmark.h"
#include "mutex.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
class ThreadManager {
public:
explicit ThreadManager(int num_threads)
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0) {
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
class ThreadManager
{
public:
explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads)
{
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(),
[this]() { return alive_threads_ == 0; });
}
Mutex &GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_)
{
return benchmark_mutex_;
}
public:
struct Result {
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
bool StartStopBarrier() EXCLUDES(end_cond_mutex_)
{
return start_stop_barrier_.wait();
}
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_)
{
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0)
{
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_)
{
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(), [this]() { return alive_threads_ == 0; });
}
public:
struct Result
{
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
} // namespace internal
} // namespace benchmark
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_MANAGER_H
#endif // BENCHMARK_THREAD_MANAGER_H

View File

@ -4,83 +4,101 @@
#include "check.h"
#include "timers.h"
namespace benchmark {
namespace internal {
namespace benchmark
{
namespace internal
{
class ThreadTimer {
explicit ThreadTimer(bool measure_process_cpu_time_)
: measure_process_cpu_time(measure_process_cpu_time_) {}
class ThreadTimer
{
explicit ThreadTimer(bool measure_process_cpu_time_) : measure_process_cpu_time(measure_process_cpu_time_)
{
}
public:
static ThreadTimer Create() {
return ThreadTimer(/*measure_process_cpu_time_=*/false);
}
static ThreadTimer CreateProcessCpuTime() {
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
public:
static ThreadTimer Create()
{
return ThreadTimer(/*measure_process_cpu_time_=*/false);
}
static ThreadTimer CreateProcessCpuTime()
{
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
// Called by each thread
void StartTimer() {
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
void StartTimer()
{
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
void StopTimer() {
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ +=
std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
void StopTimer()
{
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ += std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
// Called by each thread
void SetIterationTime(double seconds)
{
manual_time_used_ += seconds;
}
bool running() const { return running_; }
bool running() const
{
return running_;
}
// REQUIRES: timer is not running
double real_time_used() const {
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double real_time_used() const
{
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const {
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const
{
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const {
CHECK(!running_);
return manual_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const
{
CHECK(!running_);
return manual_time_used_;
}
private:
double ReadCpuTimerOfChoice() const {
if (measure_process_cpu_time) return ProcessCPUUsage();
return ThreadCPUUsage();
}
private:
double ReadCpuTimerOfChoice() const
{
if (measure_process_cpu_time)
return ProcessCPUUsage();
return ThreadCPUUsage();
}
// should the thread, or the process, time be measured?
const bool measure_process_cpu_time;
// should the thread, or the process, time be measured?
const bool measure_process_cpu_time;
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
} // namespace internal
} // namespace benchmark
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_TIMER_H
#endif // BENCHMARK_THREAD_TIMER_H

View File

@ -17,7 +17,7 @@
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h>
#include <windows.h>
#else
@ -26,7 +26,7 @@
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
@ -57,161 +57,172 @@
#include "sleep.h"
#include "string_util.h"
namespace benchmark {
namespace benchmark
{
// Suppress unused warnings on helper functions.
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
namespace {
namespace
{
#if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) +
static_cast<double>(user.QuadPart)) *
1e-7;
double MakeTime(FILETIME const &kernel_time, FILETIME const &user_time)
{
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) + static_cast<double>(user.QuadPart)) * 1e-7;
}
#elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const& ru) {
return (static_cast<double>(ru.ru_utime.tv_sec) +
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) +
static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
double MakeTime(struct rusage const &ru)
{
return (static_cast<double>(ru.ru_utime.tv_sec) + static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
}
#endif
#if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const& info) {
return (static_cast<double>(info.user_time.seconds) +
static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) +
static_cast<double>(info.system_time.microseconds) * 1e-6);
double MakeTime(thread_basic_info_data_t const &info)
{
return (static_cast<double>(info.user_time.seconds) + static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.microseconds) * 1e-6);
}
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const& ts) {
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
double MakeTime(struct timespec const &ts)
{
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) {
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
BENCHMARK_NORETURN static void DiagnoseAndExit(const char *msg)
{
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
}
} // end namespace
} // end namespace
double ProcessCPUUsage() {
double ProcessCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time,
&user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, &user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif
}
double ThreadCPUUsage() {
double ThreadCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time,
&user_time);
return MakeTime(kernel_time, user_time);
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) ==
KERN_SUCCESS) {
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == KERN_SUCCESS)
{
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// Emscripten doesn't support traditional threads
return ProcessCPUUsage();
// Emscripten doesn't support traditional threads
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_RTEMS)
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0)
return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else
#error Per-thread timing is not available on your system.
#endif
}
namespace {
namespace
{
std::string DateTimeString(bool local) {
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128;
char storage[kStorageSize];
std::size_t written;
std::string DateTimeString(bool local)
{
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128;
char storage[kStorageSize];
std::size_t written;
if (local) {
if (local)
{
#if defined(BENCHMARK_OS_WINDOWS)
written =
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
written = std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
std::tm timeinfo;
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
std::tm timeinfo;
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
} else {
}
else
{
#if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
std::tm timeinfo;
::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
std::tm timeinfo;
::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
}
CHECK(written < kStorageSize);
((void)written); // prevent unused variable in optimized mode.
return std::string(storage);
}
CHECK(written < kStorageSize);
((void)written); // prevent unused variable in optimized mode.
return std::string(storage);
}
} // end namespace
} // end namespace
std::string LocalDateTimeString() { return DateTimeString(true); }
std::string LocalDateTimeString()
{
return DateTimeString(true);
}
} // end namespace benchmark
} // end namespace benchmark

View File

@ -4,7 +4,8 @@
#include <chrono>
#include <string>
namespace benchmark {
namespace benchmark
{
// Return the CPU usage of the current process
double ProcessCPUUsage();
@ -16,33 +17,35 @@ double ChildrenCPUUsage();
double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady>
struct ChooseSteadyClock {
typedef std::chrono::high_resolution_clock type;
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> struct ChooseSteadyClock
{
typedef std::chrono::high_resolution_clock type;
};
template <>
struct ChooseSteadyClock<false> {
typedef std::chrono::steady_clock type;
template <> struct ChooseSteadyClock<false>
{
typedef std::chrono::steady_clock type;
};
#endif
struct ChooseClockType {
struct ChooseClockType
{
#if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type;
typedef ChooseSteadyClock<>::type type;
#else
typedef std::chrono::high_resolution_clock type;
typedef std::chrono::high_resolution_clock type;
#endif
};
inline double ChronoClockNow() {
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
inline double ChronoClockNow()
{
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
}
std::string LocalDateTimeString();
} // end namespace benchmark
} // end namespace benchmark
#endif // BENCHMARK_TIMERS_H
#endif // BENCHMARK_TIMERS_H

View File

@ -3,134 +3,165 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
void BM_spin_empty(benchmark::State &state)
{
for (auto _ : state)
{
for (int x = 0; x < state.range(0); ++x)
{
benchmark::DoNotOptimize(x);
}
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
void BM_spin_pause_before(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
void BM_spin_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
for (auto _ : state) {
state.PauseTiming();
state.ResumeTiming();
}
void BM_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
state.ResumeTiming();
}
}
BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
void BM_spin_pause_after(benchmark::State &state)
{
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
void BM_spin_pause_before_and_after(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
for (auto _ : state) {
}
void BM_empty_stop_start(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State& state) {
benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning()) {
++iter_count;
}
assert(iter_count == state.iterations());
void BM_KeepRunning(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning())
{
++iter_count;
}
assert(iter_count == state.iterations());
}
BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State& state) {
// Choose a prime batch size to avoid evenly dividing max_iterations.
const benchmark::IterationCount batch_size = 101;
benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size)) {
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
void BM_KeepRunningBatch(benchmark::State &state)
{
// Choose a prime batch size to avoid evenly dividing max_iterations.
const benchmark::IterationCount batch_size = 101;
benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size))
{
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
}
BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State& state) {
benchmark::IterationCount iter_count = 0;
for (auto _ : state) {
++iter_count;
}
assert(iter_count == state.max_iterations);
void BM_RangedFor(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
for (auto _ : state)
{
++iter_count;
}
assert(iter_count == state.max_iterations);
}
BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits.
static_assert(std::is_same<
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value, "");
static_assert(std::is_same<typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value,
"");
BENCHMARK_MAIN();

View File

@ -4,125 +4,141 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace internal {
namespace {
namespace benchmark
{
namespace internal
{
namespace
{
TEST(AddRangeTest, Simple) {
std::vector<int> dst;
AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
TEST(AddRangeTest, Simple)
{
std::vector<int> dst;
AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Simple64) {
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
TEST(AddRangeTest, Simple64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Advanced) {
std::vector<int> dst;
AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
TEST(AddRangeTest, Advanced)
{
std::vector<int> dst;
AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, Advanced64) {
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
TEST(AddRangeTest, Advanced64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, FullRange8) {
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
TEST(AddRangeTest, FullRange8)
{
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
}
TEST(AddRangeTest, FullRange64) {
std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT(
dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL,
1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL));
TEST(AddRangeTest, FullRange64)
{
std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT(dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL));
}
TEST(AddRangeTest, NegativeRanges) {
std::vector<int> dst;
AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
TEST(AddRangeTest, NegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
}
TEST(AddRangeTest, StrictlyNegative) {
std::vector<int> dst;
AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
TEST(AddRangeTest, StrictlyNegative)
{
std::vector<int> dst;
AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
}
TEST(AddRangeTest, SymmetricNegativeRanges) {
std::vector<int> dst;
AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
TEST(AddRangeTest, SymmetricNegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
}
TEST(AddRangeTest, SymmetricNegativeRangesOddMult) {
std::vector<int> dst;
AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
TEST(AddRangeTest, SymmetricNegativeRangesOddMult)
{
std::vector<int> dst;
AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
}
TEST(AddRangeTest, NegativeRangesAsymmetric) {
std::vector<int> dst;
AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
TEST(AddRangeTest, NegativeRangesAsymmetric)
{
std::vector<int> dst;
AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
}
TEST(AddRangeTest, NegativeRangesLargeStep) {
// Always include -1, 0, 1 when crossing zero.
std::vector<int> dst;
AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
TEST(AddRangeTest, NegativeRangesLargeStep)
{
// Always include -1, 0, 1 when crossing zero.
std::vector<int> dst;
AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
}
TEST(AddRangeTest, ZeroOnlyRange) {
std::vector<int> dst;
AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
TEST(AddRangeTest, ZeroOnlyRange)
{
std::vector<int> dst;
AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
}
TEST(AddRangeTest, NegativeRange64) {
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
TEST(AddRangeTest, NegativeRange64)
{
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
}
TEST(AddRangeTest, NegativeRangePreservesExistingOrder) {
// If elements already exist in the range, ensure we don't change
// their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3};
AddRange<int64_t>(&dst, -2, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
TEST(AddRangeTest, NegativeRangePreservesExistingOrder)
{
// If elements already exist in the range, ensure we don't change
// their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3};
AddRange<int64_t>(&dst, -2, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
}
TEST(AddRangeTest, FullNegativeRange64) {
std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024);
EXPECT_THAT(
dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL,
-1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL,
1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL,
1125899906842624LL, 1152921504606846976LL, max}));
TEST(AddRangeTest, FullNegativeRange64)
{
std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024);
EXPECT_THAT(dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL, -1099511627776LL, -1073741824LL, -1048576LL,
-1024LL, -1LL, 0LL, 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, max}));
}
TEST(AddRangeTest, Simple8) {
std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
TEST(AddRangeTest, Simple8)
{
std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
}
} // namespace
} // namespace internal
} // namespace benchmark
} // namespace
} // namespace internal
} // namespace benchmark

View File

@ -1,74 +1,84 @@
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
namespace {
namespace
{
using namespace benchmark;
using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty) {
const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
TEST(BenchmarkNameTest, Empty)
{
const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
}
TEST(BenchmarkNameTest, FunctionName) {
auto name = BenchmarkName();
name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
TEST(BenchmarkNameTest, FunctionName)
{
auto name = BenchmarkName();
name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
}
TEST(BenchmarkNameTest, FunctionNameAndArgs) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
TEST(BenchmarkNameTest, FunctionNameAndArgs)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
}
TEST(BenchmarkNameTest, MinTime) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4";
name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
TEST(BenchmarkNameTest, MinTime)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4";
name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
}
TEST(BenchmarkNameTest, Iterations) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.iterations = "iterations:42";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
TEST(BenchmarkNameTest, Iterations)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.iterations = "iterations:42";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
}
TEST(BenchmarkNameTest, Repetitions) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.repetitions = "repetitions:24";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
TEST(BenchmarkNameTest, Repetitions)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.repetitions = "repetitions:24";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
}
TEST(BenchmarkNameTest, TimeType) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.time_type = "hammer_time";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
TEST(BenchmarkNameTest, TimeType)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.time_type = "hammer_time";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
}
TEST(BenchmarkNameTest, Threads) {
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.threads = "threads:256";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
TEST(BenchmarkNameTest, Threads)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.threads = "threads:256";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
}
TEST(BenchmarkNameTest, TestEmptyFunctionName) {
auto name = BenchmarkName();
name.args = "first:3/second:4";
name.threads = "threads:22";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
TEST(BenchmarkNameTest, TestEmptyFunctionName)
{
auto name = BenchmarkName();
name.args = "first:3/second:4";
name.threads = "threads:22";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
}
} // end namespace
} // end namespace

View File

@ -24,219 +24,252 @@
#define BENCHMARK_NOINLINE
#endif
namespace {
namespace
{
int BENCHMARK_NOINLINE Factorial(uint32_t n) {
return (n == 1) ? 1 : n * Factorial(n - 1);
int BENCHMARK_NOINLINE Factorial(uint32_t n)
{
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth) {
double pi = 0.0;
for (int i = 0; i < depth; ++i) {
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
double CalculatePi(int depth)
{
double pi = 0.0;
for (int i = 0; i < depth; ++i)
{
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
}
std::set<int64_t> ConstructRandomSet(int64_t size) {
std::set<int64_t> s;
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
return s;
std::set<int64_t> ConstructRandomSet(int64_t size)
{
std::set<int64_t> s;
for (int i = 0; i < size; ++i)
s.insert(s.end(), i);
return s;
}
std::mutex test_vector_mu;
std::vector<int>* test_vector = nullptr;
std::vector<int> *test_vector = nullptr;
} // end namespace
} // end namespace
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
for (auto _ : state) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
static void BM_Factorial(benchmark::State &state)
{
int fac_42 = 0;
for (auto _ : state)
fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
}
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
static void BM_CalculatePiRange(benchmark::State &state)
{
double pi = 0.0;
for (auto _ : state)
pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
for (auto _ : state) {
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
static void BM_CalculatePi(benchmark::State &state)
{
static const int depth = 1024;
for (auto _ : state)
{
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
std::set<int64_t> data;
for (auto _ : state) {
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j) data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
static void BM_SetInsert(benchmark::State &state)
{
std::set<int64_t> data;
for (auto _ : state)
{
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
// non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
for (auto _ : state) {
Container c;
for (int64_t i = state.range(0); --i;) c.push_back(v);
}
const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
template <typename Container, typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State &state)
{
ValueType v = 42;
for (auto _ : state)
{
Container c;
for (int64_t i = state.range(0); --i;)
c.push_back(v);
}
const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)
->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State& state) {
size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-');
std::string s2(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
static void BM_StringCompare(benchmark::State &state)
{
size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-');
std::string s2(len, '-');
for (auto _ : state)
benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State& state) {
if (state.thread_index == 0) {
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
for (auto _ : state) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0) {
delete test_vector;
}
static void BM_SetupTeardown(benchmark::State &state)
{
if (state.thread_index == 0)
{
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
for (auto _ : state)
{
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
static void BM_LongTest(benchmark::State &state)
{
double tracker = 0.0;
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
static void BM_ParallelMemset(benchmark::State &state)
{
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
test_vector = new std::vector<int>(static_cast<size_t>(size));
}
for (auto _ : state) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
if (state.thread_index == 0)
{
test_vector = new std::vector<int>(static_cast<size_t>(size));
}
}
if (state.thread_index == 0) {
delete test_vector;
}
for (auto _ : state)
{
for (int i = from; i < to; i++)
{
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
int64_t slept_for = 0;
int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
static void BM_ManualTiming(benchmark::State &state)
{
int64_t slept_for = 0;
int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{static_cast<double>(microseconds)};
for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
for (auto _ : state)
{
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
}
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
for (auto _ : state) {
}
template <class... Args> void BM_with_args(benchmark::State &state, Args &&...)
{
for (auto _ : state)
{
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"),
std::pair<int, double>(42, 3.8));
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State& state, int, double) {
while(state.KeepRunning()) {}
void BM_non_template_args(benchmark::State &state, int, double)
{
while (state.KeepRunning())
{
}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11
#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) {
switch (st.range(0)) {
static void BM_DenseThreadRanges(benchmark::State &st)
{
switch (st.range(0))
{
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 ||
st.threads == 14);
break;
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning()) {
}
assert(false && "Invalid test case number");
}
while (st.KeepRunning())
{
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);

View File

@ -4,61 +4,65 @@
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
}
// CHECK-LABEL: test_basic:
extern "C" void test_basic() {
int x;
benchmark::DoNotOptimize(&x);
x = 101;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: movl $101, [[DEST]]
// CHECK: ret
extern "C" void test_basic()
{
int x;
benchmark::DoNotOptimize(&x);
x = 101;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: movl $101, [[DEST]]
// CHECK: ret
}
// CHECK-LABEL: test_redundant_store:
extern "C" void test_redundant_store() {
ExternInt = 3;
benchmark::ClobberMemory();
ExternInt = 51;
// CHECK-DAG: ExternInt
// CHECK-DAG: movl $3
// CHECK: movl $51
extern "C" void test_redundant_store()
{
ExternInt = 3;
benchmark::ClobberMemory();
ExternInt = 51;
// CHECK-DAG: ExternInt
// CHECK-DAG: movl $3
// CHECK: movl $51
}
// CHECK-LABEL: test_redundant_read:
extern "C" void test_redundant_read() {
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK-NOT: ExternInt2
// CHECK: ret
extern "C" void test_redundant_read()
{
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK-NOT: ExternInt2
// CHECK: ret
}
// CHECK-LABEL: test_redundant_read2:
extern "C" void test_redundant_read2() {
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ExternInt2(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ret
extern "C" void test_redundant_read2()
{
int x;
benchmark::DoNotOptimize(&x);
x = ExternInt;
benchmark::ClobberMemory();
x = ExternInt2;
benchmark::ClobberMemory();
// CHECK: leaq [[DEST:[^,]+]], %rax
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ExternInt2(%rip)
// CHECK: movl %eax, [[DEST]]
// CHECK: ret
}

View File

@ -4,198 +4,215 @@
#include "../src/internal_macros.h"
#include "gtest/gtest.h"
namespace benchmark {
namespace {
namespace benchmark
{
namespace
{
#if defined(BENCHMARK_OS_WINDOWS)
int setenv(const char* name, const char* value, int overwrite) {
if (!overwrite) {
// NOTE: getenv_s is far superior but not available under mingw.
char* env_value = getenv(name);
if (env_value == nullptr) {
return -1;
int setenv(const char *name, const char *value, int overwrite)
{
if (!overwrite)
{
// NOTE: getenv_s is far superior but not available under mingw.
char *env_value = getenv(name);
if (env_value == nullptr)
{
return -1;
}
}
}
return _putenv_s(name, value);
return _putenv_s(name, value);
}
int unsetenv(const char* name) {
return _putenv_s(name, "");
int unsetenv(const char *name)
{
return _putenv_s(name, "");
}
#endif // BENCHMARK_OS_WINDOWS
#endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) {
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
TEST(BoolFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
}
TEST(BoolFromEnv, False) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
TEST(BoolFromEnv, False)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(BoolFromEnv, True) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
TEST(BoolFromEnv, True)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
#ifndef BENCHMARK_OS_WINDOWS
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
unsetenv("BENCHMARK_IN_ENV");
#endif
}
TEST(Int32FromEnv, NotInEnv) {
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
TEST(Int32FromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
}
TEST(Int32FromEnv, InvalidInteger) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("BENCHMARK_IN_ENV");
TEST(Int32FromEnv, InvalidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(Int32FromEnv, ValidInteger) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("BENCHMARK_IN_ENV");
TEST(Int32FromEnv, ValidInteger)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(DoubleFromEnv, NotInEnv) {
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
TEST(DoubleFromEnv, NotInEnv)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
}
TEST(DoubleFromEnv, InvalidReal) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("BENCHMARK_IN_ENV");
TEST(DoubleFromEnv, InvalidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(DoubleFromEnv, ValidReal) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("BENCHMARK_IN_ENV");
TEST(DoubleFromEnv, ValidReal)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
unsetenv("BENCHMARK_IN_ENV");
}
TEST(StringFromEnv, Default) {
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
TEST(StringFromEnv, Default)
{
ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
}
TEST(StringFromEnv, Valid) {
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("BENCHMARK_IN_ENV");
TEST(StringFromEnv, Valid)
{
ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
unsetenv("BENCHMARK_IN_ENV");
}
} // namespace
} // namespace benchmark
} // namespace
} // namespace benchmark

View File

@ -1,74 +1,73 @@
#undef NDEBUG
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include "benchmark/benchmark.h"
#include "output_test.h"
namespace {
namespace
{
#define ADD_COMPLEXITY_CASES(...) \
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
#define ADD_COMPLEXITY_CASES(...) int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
std::string rms_test_name, std::string big_o) {
SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(
TC_ConsoleOut,
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"BigO\",$", MR_Next},
{"\"cpu_coefficient\": %float,$", MR_Next},
{"\"real_coefficient\": %float,$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"RMS\",$", MR_Next},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
int AddComplexityTest(std::string test_name, std::string big_o_test_name, std::string rms_test_name, std::string big_o)
{
SetSubstitutions({{"%name", test_name},
{"%bigo_name", big_o_test_name},
{"%rms_name", rms_test_name},
{"%bigo_str", "[ ]* %float " + big_o},
{"%bigo", big_o},
{"%rms", "[ ]*[0-9]+ %"}});
AddCases(TC_ConsoleOut, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"BigO\",$", MR_Next},
{"\"cpu_coefficient\": %float,$", MR_Next},
{"\"real_coefficient\": %float,$", MR_Next},
{"\"big_o\": \"%bigo\",$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next},
{"\"name\": \"%rms_name\",$"},
{"\"run_name\": \"%name\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": %int,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"RMS\",$", MR_Next},
{"\"rms\": %float$", MR_Next},
{"}", MR_Next}});
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
{"^\"%bigo_name\"", MR_Not},
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
return 0;
}
} // end namespace
} // end namespace
// ========================================================================= //
// --------------------------- Testing BigO O(1) --------------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
void BM_Complexity_O1(benchmark::State &state)
{
for (auto _ : state)
{
for (int i = 0; i < 1024; ++i)
{
benchmark::DoNotOptimize(&i);
}
}
}
state.SetComplexityN(state.range(0));
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
->Complexity([](benchmark::IterationCount) { return 1.0; });
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](benchmark::IterationCount) { return 1.0; });
const char *one_test_name = "BM_Complexity_O1";
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
@ -81,53 +80,46 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
enum_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
// Add auto enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
auto_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
lambda_big_o_1);
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
// ========================================================================= //
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
std::vector<int> ConstructRandomVector(int64_t size) {
std::vector<int> v;
v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i) {
v.push_back(static_cast<int>(std::rand() % size));
}
return v;
std::vector<int> ConstructRandomVector(int64_t size)
{
std::vector<int> v;
v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i)
{
v.push_back(static_cast<int>(std::rand() % size));
}
return v;
}
void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
void BM_Complexity_O_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0));
// Test worst case scenario (item not in vector)
const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state)
{
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
->Complexity([](benchmark::IterationCount n) -> double { return static_cast<double>(n); });
BENCHMARK(BM_Complexity_O_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
const char *n_test_name = "BM_Complexity_O_N";
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
@ -136,39 +128,31 @@ const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n);
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
lambda_big_o_n);
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
// ========================================================================= //
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
static void BM_Complexity_O_N_log_N(benchmark::State &state)
{
auto v = ConstructRandomVector(state.range(0));
for (auto _ : state)
{
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));
}
static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity([](benchmark::IterationCount n) {
return kLog2E * n * log(static_cast<double>(n));
});
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16)
->Complexity();
->Complexity([](benchmark::IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); });
BENCHMARK(BM_Complexity_O_N_log_N)->RangeMultiplier(2)->Range(1 << 10, 1 << 16)->Complexity();
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
@ -177,37 +161,36 @@ const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
// Add lambda tests
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
// ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= //
void BM_ComplexityCaptureArgs(benchmark::State& state, int n) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(n);
void BM_ComplexityCaptureArgs(benchmark::State &state, int n)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(n);
}
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)->Complexity(benchmark::oN)->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name =
"BM_ComplexityCaptureArgs/capture_test";
const std::string complexity_capture_name = "BM_ComplexityCaptureArgs/capture_test";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
complexity_capture_name + "_RMS", "N");
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", complexity_capture_name + "_RMS", "N");
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -12,49 +12,55 @@
#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined.
#endif
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
volatile benchmark::IterationCount x = state.iterations();
((void)x);
}
void BM_empty(benchmark::State &state)
{
while (state.KeepRunning())
{
volatile benchmark::IterationCount x = state.iterations();
((void)x);
}
}
BENCHMARK(BM_empty);
// The new C++11 interface for args/ranges requires initializer list support.
// Therefore we provide the old interface to support C++03.
void BM_old_arg_range_interface(benchmark::State& state) {
assert((state.range(0) == 1 && state.range(1) == 2) ||
(state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning()) {
}
void BM_old_arg_range_interface(benchmark::State &state)
{
assert((state.range(0) == 1 && state.range(1) == 2) || (state.range(0) == 5 && state.range(1) == 6));
while (state.KeepRunning())
{
}
}
BENCHMARK(BM_old_arg_range_interface)->ArgPair(1, 2)->RangePair(5, 5, 6, 6);
template <class T, class U>
void BM_template2(benchmark::State& state) {
BM_empty(state);
template <class T, class U> void BM_template2(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE2(BM_template2, int, long);
template <class T>
void BM_template1(benchmark::State& state) {
BM_empty(state);
template <class T> void BM_template1(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE(BM_template1, long);
BENCHMARK_TEMPLATE1(BM_template1, int);
template <class T>
struct BM_Fixture : public ::benchmark::Fixture {
template <class T> struct BM_Fixture : public ::benchmark::Fixture
{
};
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) {
BM_empty(state);
BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State &state)
{
BM_empty(state);
}
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) {
BM_empty(state);
BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State &state)
{
BM_empty(state);
}
void BM_counters(benchmark::State& state) {
void BM_counters(benchmark::State &state)
{
BM_empty(state);
state.counters["Foo"] = 2;
}

View File

@ -17,64 +17,80 @@
#define TEST_HAS_NO_EXCEPTIONS
#endif
void TestHandler() {
void TestHandler()
{
#ifndef TEST_HAS_NO_EXCEPTIONS
throw std::logic_error("");
throw std::logic_error("");
#else
std::abort();
std::abort();
#endif
}
void try_invalid_pause_resume(benchmark::State& state) {
void try_invalid_pause_resume(benchmark::State &state)
{
#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS)
try {
state.PauseTiming();
std::abort();
} catch (std::logic_error const&) {
}
try {
state.ResumeTiming();
std::abort();
} catch (std::logic_error const&) {
}
try
{
state.PauseTiming();
std::abort();
}
catch (std::logic_error const &)
{
}
try
{
state.ResumeTiming();
std::abort();
}
catch (std::logic_error const &)
{
}
#else
(void)state; // avoid unused warning
(void)state; // avoid unused warning
#endif
}
void BM_diagnostic_test(benchmark::State& state) {
static bool called_once = false;
void BM_diagnostic_test(benchmark::State &state)
{
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
if (called_once == false)
try_invalid_pause_resume(state);
called_once = true;
called_once = true;
}
BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State &state)
{
static bool called_once = false;
void BM_diagnostic_test_keep_running(benchmark::State& state) {
static bool called_once = false;
if (called_once == false)
try_invalid_pause_resume(state);
if (called_once == false) try_invalid_pause_resume(state);
while (state.KeepRunning())
{
benchmark::DoNotOptimize(state.iterations());
}
while(state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false)
try_invalid_pause_resume(state);
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
called_once = true;
}
BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
int main(int argc, char *argv[])
{
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}

View File

@ -10,34 +10,36 @@
// reporter in the presence of DisplayAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
int main(int argc, char* argv[]) {
const std::string output = GetFileReporterOutput(argc, argv);
int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
std::cout << "Precondition mismatch. Expected to only find 6 "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
{
std::cout << "Precondition mismatch. Expected to only find 6 "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
return 0;
return 0;
}

View File

@ -4,160 +4,181 @@
#pragma clang diagnostic ignored "-Wreturn-type"
#endif
extern "C" {
extern "C"
{
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
extern int ExternInt;
extern int ExternInt2;
extern int ExternInt3;
inline int Add42(int x) { return x + 42; }
inline int Add42(int x)
{
return x + 42;
}
struct NotTriviallyCopyable {
NotTriviallyCopyable();
explicit NotTriviallyCopyable(int x) : value(x) {}
NotTriviallyCopyable(NotTriviallyCopyable const&);
int value;
};
struct Large {
int value;
int data[2];
};
struct NotTriviallyCopyable
{
NotTriviallyCopyable();
explicit NotTriviallyCopyable(int x) : value(x)
{
}
NotTriviallyCopyable(NotTriviallyCopyable const &);
int value;
};
struct Large
{
int value;
int data[2];
};
}
// CHECK-LABEL: test_with_rvalue:
extern "C" void test_with_rvalue() {
benchmark::DoNotOptimize(Add42(0));
// CHECK: movl $42, %eax
// CHECK: ret
extern "C" void test_with_rvalue()
{
benchmark::DoNotOptimize(Add42(0));
// CHECK: movl $42, %eax
// CHECK: ret
}
// CHECK-LABEL: test_with_large_rvalue:
extern "C" void test_with_large_rvalue() {
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
extern "C" void test_with_large_rvalue()
{
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_rvalue:
extern "C" void test_with_non_trivial_rvalue() {
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret
extern "C" void test_with_non_trivial_rvalue()
{
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
// CHECK: mov{{l|q}} ExternInt(%rip)
// CHECK: ret
}
// CHECK-LABEL: test_with_lvalue:
extern "C" void test_with_lvalue() {
int x = 101;
benchmark::DoNotOptimize(x);
// CHECK-GNU: movl $101, %eax
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
extern "C" void test_with_lvalue()
{
int x = 101;
benchmark::DoNotOptimize(x);
// CHECK-GNU: movl $101, %eax
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
}
// CHECK-LABEL: test_with_large_lvalue:
extern "C" void test_with_large_lvalue() {
Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
extern "C" void test_with_large_lvalue()
{
Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_lvalue:
extern "C" void test_with_non_trivial_lvalue() {
NotTriviallyCopyable NTC(ExternInt);
benchmark::DoNotOptimize(NTC);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
extern "C" void test_with_non_trivial_lvalue()
{
NotTriviallyCopyable NTC(ExternInt);
benchmark::DoNotOptimize(NTC);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
}
// CHECK-LABEL: test_with_const_lvalue:
extern "C" void test_with_const_lvalue() {
const int x = 123;
benchmark::DoNotOptimize(x);
// CHECK: movl $123, %eax
// CHECK: ret
extern "C" void test_with_const_lvalue()
{
const int x = 123;
benchmark::DoNotOptimize(x);
// CHECK: movl $123, %eax
// CHECK: ret
}
// CHECK-LABEL: test_with_large_const_lvalue:
extern "C" void test_with_large_const_lvalue() {
const Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
extern "C" void test_with_large_const_lvalue()
{
const Large L{ExternInt, {ExternInt, ExternInt}};
benchmark::DoNotOptimize(L);
// CHECK: ExternInt(%rip)
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
// CHECK: ret
}
// CHECK-LABEL: test_with_non_trivial_const_lvalue:
extern "C" void test_with_non_trivial_const_lvalue() {
const NotTriviallyCopyable Obj(ExternInt);
benchmark::DoNotOptimize(Obj);
// CHECK: mov{{q|l}} ExternInt(%rip)
// CHECK: ret
extern "C" void test_with_non_trivial_const_lvalue()
{
const NotTriviallyCopyable Obj(ExternInt);
benchmark::DoNotOptimize(Obj);
// CHECK: mov{{q|l}} ExternInt(%rip)
// CHECK: ret
}
// CHECK-LABEL: test_div_by_two:
extern "C" int test_div_by_two(int input) {
int divisor = 2;
benchmark::DoNotOptimize(divisor);
return input / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
extern "C" int test_div_by_two(int input)
{
int divisor = 2;
benchmark::DoNotOptimize(divisor);
return input / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
// CHECK-LABEL: test_inc_integer:
extern "C" int test_inc_integer() {
int x = 0;
for (int i=0; i < 5; ++i)
benchmark::DoNotOptimize(++x);
// CHECK: movl $1, [[DEST:.*]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK-CLANG: movl [[DEST]], %eax
// CHECK: ret
return x;
extern "C" int test_inc_integer()
{
int x = 0;
for (int i = 0; i < 5; ++i)
benchmark::DoNotOptimize(++x);
// CHECK: movl $1, [[DEST:.*]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
// CHECK-CLANG: movl [[DEST]], %eax
// CHECK: ret
return x;
}
// CHECK-LABEL: test_pointer_rvalue
extern "C" void test_pointer_rvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
benchmark::DoNotOptimize(&x);
extern "C" void test_pointer_rvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
benchmark::DoNotOptimize(&x);
}
// CHECK-LABEL: test_pointer_const_lvalue:
extern "C" void test_pointer_const_lvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
int * const xp = &x;
benchmark::DoNotOptimize(xp);
extern "C" void test_pointer_const_lvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
// CHECK: ret
int x = 42;
int *const xp = &x;
benchmark::DoNotOptimize(xp);
}
// CHECK-LABEL: test_pointer_lvalue:
extern "C" void test_pointer_lvalue() {
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
// CHECK: ret
int x = 42;
int *xp = &x;
benchmark::DoNotOptimize(xp);
extern "C" void test_pointer_lvalue()
{
// CHECK: movl $42, [[DEST:.*]]
// CHECK: leaq [[DEST]], %rax
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
// CHECK: ret
int x = 42;
int *xp = &x;
benchmark::DoNotOptimize(xp);
}

View File

@ -2,51 +2,61 @@
#include <cstdint>
namespace {
namespace
{
#if defined(__GNUC__)
std::uint64_t double_up(const std::uint64_t x) __attribute__((const));
#endif
std::uint64_t double_up(const std::uint64_t x) { return x * 2; }
std::uint64_t double_up(const std::uint64_t x)
{
return x * 2;
}
} // namespace
// Using DoNotOptimize on types like BitRef seem to cause a lot of problems
// with the inline assembly on both GCC and Clang.
struct BitRef {
int index;
unsigned char &byte;
struct BitRef
{
int index;
unsigned char &byte;
public:
static BitRef Make() {
static unsigned char arr[2] = {};
BitRef b(1, arr[0]);
return b;
}
private:
BitRef(int i, unsigned char& b) : index(i), byte(b) {}
public:
static BitRef Make()
{
static unsigned char arr[2] = {};
BitRef b(1, arr[0]);
return b;
}
private:
BitRef(int i, unsigned char &b) : index(i), byte(b)
{
}
};
int main(int, char*[]) {
// this test verifies compilation of DoNotOptimize() for some types
int main(int, char *[])
{
// this test verifies compilation of DoNotOptimize() for some types
char buffer8[8] = "";
benchmark::DoNotOptimize(buffer8);
char buffer8[8] = "";
benchmark::DoNotOptimize(buffer8);
char buffer20[20] = "";
benchmark::DoNotOptimize(buffer20);
char buffer20[20] = "";
benchmark::DoNotOptimize(buffer20);
char buffer1024[1024] = "";
benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]);
char buffer1024[1024] = "";
benchmark::DoNotOptimize(buffer1024);
benchmark::DoNotOptimize(&buffer1024[0]);
int x = 123;
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42);
int x = 123;
benchmark::DoNotOptimize(x);
benchmark::DoNotOptimize(&x);
benchmark::DoNotOptimize(x += 42);
benchmark::DoNotOptimize(double_up(x));
benchmark::DoNotOptimize(double_up(x));
// These tests are to e
benchmark::DoNotOptimize(BitRef::Make());
BitRef lval = BitRef::Make();
benchmark::DoNotOptimize(lval);
// These tests are to e
benchmark::DoNotOptimize(BitRef::Make());
BitRef lval = BitRef::Make();
benchmark::DoNotOptimize(lval);
}

View File

@ -10,95 +10,116 @@
#include <sstream>
#include <string>
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
++count_;
ConsoleReporter::ReportRuns(report);
};
virtual void ReportRuns(const std::vector<Run> &report)
{
++count_;
ConsoleReporter::ReportRuns(report);
};
TestReporter() : count_(0) {}
TestReporter() : count_(0)
{
}
virtual ~TestReporter() {}
virtual ~TestReporter()
{
}
size_t GetCount() const { return count_; }
size_t GetCount() const
{
return count_;
}
private:
mutable size_t count_;
private:
mutable size_t count_;
};
} // end namespace
} // end namespace
static void NoPrefix(benchmark::State& state) {
for (auto _ : state) {
}
static void NoPrefix(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
for (auto _ : state) {
}
static void BM_Foo(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
for (auto _ : state) {
}
static void BM_Bar(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
for (auto _ : state) {
}
static void BM_FooBar(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
for (auto _ : state) {
}
static void BM_FooBa(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_FooBa);
int main(int argc, char **argv) {
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") !=
std::string::npos;
int main(int argc, char **argv)
{
bool list_only = false;
for (int i = 0; i < argc; ++i)
list_only |= std::string(argv[i]).find("--benchmark_list_tests") != std::string::npos;
benchmark::Initialize(&argc, argv);
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
const size_t returned_count =
benchmark::RunSpecifiedBenchmarks(&test_reporter);
TestReporter test_reporter;
const size_t returned_count = benchmark::RunSpecifiedBenchmarks(&test_reporter);
if (argc == 2) {
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (argc == 2)
{
// Make sure we ran all of the tests
std::stringstream ss(argv[1]);
size_t expected_return;
ss >> expected_return;
if (returned_count != expected_return) {
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = "
<< returned_count << std::endl;
return -1;
if (returned_count != expected_return)
{
std::cerr << "ERROR: Expected " << expected_return
<< " tests to match the filter but returned_count = " << returned_count << std::endl;
return -1;
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports)
{
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count << std::endl;
return -1;
}
}
const size_t expected_reports = list_only ? 0 : expected_return;
const size_t reports_count = test_reporter.GetCount();
if (reports_count != expected_reports) {
std::cerr << "ERROR: Expected " << expected_reports
<< " tests to be run but reported_count = " << reports_count
<< std::endl;
return -1;
}
}
return 0;
return 0;
}

View File

@ -4,44 +4,57 @@
#include <cassert>
#include <memory>
class MyFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() == nullptr);
data.reset(new int(42));
class MyFixture : public ::benchmark::Fixture
{
public:
void SetUp(const ::benchmark::State &state)
{
if (state.thread_index == 0)
{
assert(data.get() == nullptr);
data.reset(new int(42));
}
}
}
void TearDown(const ::benchmark::State& state) {
if (state.thread_index == 0) {
assert(data.get() != nullptr);
data.reset();
void TearDown(const ::benchmark::State &state)
{
if (state.thread_index == 0)
{
assert(data.get() != nullptr);
data.reset();
}
}
}
~MyFixture() { assert(data == nullptr); }
~MyFixture()
{
assert(data == nullptr);
}
std::unique_ptr<int> data;
std::unique_ptr<int> data;
};
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st) {
}
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st)
{
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st)
{
}
}
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
}
for (auto _ : st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State &st)
{
if (st.thread_index == 0)
{
assert(data.get() != nullptr);
assert(*data == 42);
}
for (auto _ : st)
{
assert(data.get() != nullptr);
assert(*data == 42);
}
st.SetItemsProcessed(st.range(0));
}
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();

View File

@ -1,29 +1,28 @@
#undef NDEBUG
#include <chrono>
#include <thread>
#include "../src/timers.h"
#include "benchmark/benchmark.h"
#include "output_test.h"
#include <chrono>
#include <thread>
static const std::chrono::duration<double, std::milli> time_frame(50);
static const double time_frame_in_sec(
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(
time_frame)
.count());
std::chrono::duration_cast<std::chrono::duration<double, std::ratio<1, 1>>>(time_frame).count());
void MyBusySpinwait() {
const auto start = benchmark::ChronoClockNow();
void MyBusySpinwait()
{
const auto start = benchmark::ChronoClockNow();
while (true) {
const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start;
while (true)
{
const auto now = benchmark::ChronoClockNow();
const auto elapsed = now - start;
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >=
time_frame)
return;
}
if (std::chrono::duration<double, std::chrono::seconds::period>(elapsed) >= time_frame)
return;
}
}
// ========================================================================= //
@ -33,152 +32,92 @@ void MyBusySpinwait() {
// ========================================================================= //
// BM_MainThread
void BM_MainThread(benchmark::State& state) {
for (auto _ : state) {
MyBusySpinwait();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
void BM_MainThread(benchmark::State &state)
{
for (auto _ : state)
{
MyBusySpinwait();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// BM_WorkerThread
void BM_WorkerThread(benchmark::State& state) {
for (auto _ : state) {
std::thread Worker(&MyBusySpinwait);
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
void BM_WorkerThread(benchmark::State &state)
{
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait);
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_WorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// BM_MainThreadAndWorkerThread
void BM_MainThreadAndWorkerThread(benchmark::State& state) {
for (auto _ : state) {
std::thread Worker(&MyBusySpinwait);
MyBusySpinwait();
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] =
benchmark::Counter{1, benchmark::Counter::kIsRate};
void BM_MainThreadAndWorkerThread(benchmark::State &state)
{
for (auto _ : state)
{
std::thread Worker(&MyBusySpinwait);
MyBusySpinwait();
Worker.join();
state.SetIterationTime(time_frame_in_sec);
}
state.counters["invtime"] = benchmark::Counter{1, benchmark::Counter::kIsRate};
}
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(1)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime()->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2);
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)
->Iterations(1)
->Threads(2)
->MeasureProcessCPUTime()
->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->UseManualTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseRealTime();
BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime()->UseManualTime();
// ========================================================================= //
// ---------------------------- TEST CASES END ----------------------------- //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -1,8 +1,10 @@
#include "benchmark/benchmark.h"
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);

View File

@ -3,54 +3,68 @@
#include <cstdlib>
#include <map>
namespace {
namespace
{
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
std::map<int, int> ConstructRandomMap(int size)
{
std::map<int, int> m;
for (int i = 0; i < size; ++i)
{
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
}
} // namespace
} // namespace
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = static_cast<int>(state.range(0));
std::map<int, int> m;
for (auto _ : state) {
state.PauseTiming();
m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(std::rand() % size));
static void BM_MapLookup(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0));
std::map<int, int> m;
for (auto _ : state)
{
state.PauseTiming();
m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
}
state.SetItemsProcessed(state.iterations() * size);
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
// Using fixtures.
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) {
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
class MapFixture : public ::benchmark::Fixture
{
public:
void SetUp(const ::benchmark::State &st)
{
m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) { m.clear(); }
void TearDown(const ::benchmark::State &)
{
m.clear();
}
std::map<int, int> m;
std::map<int, int> m;
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = static_cast<int>(state.range(0));
for (auto _ : state) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(std::rand() % size));
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State &state)
{
const int size = static_cast<int>(state.range(0));
for (auto _ : state)
{
for (int i = 0; i < size; ++i)
{
benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
}
state.SetItemsProcessed(state.iterations() * size);
state.SetItemsProcessed(state.iterations() * size);
}
BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12);

View File

@ -4,18 +4,24 @@
#include "benchmark/benchmark.h"
#include "output_test.h"
class TestMemoryManager : public benchmark::MemoryManager {
void Start() {}
void Stop(Result* result) {
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
class TestMemoryManager : public benchmark::MemoryManager
{
void Start()
{
}
void Stop(Result *result)
{
result->num_allocs = 42;
result->max_bytes_used = 42000;
}
};
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
@ -35,10 +41,11 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
int main(int argc, char* argv[]) {
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
int main(int argc, char *argv[])
{
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
benchmark::RegisterMemoryManager(mm.get());
RunOutputTests(argc, argv);
benchmark::RegisterMemoryManager(nullptr);
benchmark::RegisterMemoryManager(mm.get());
RunOutputTests(argc, argv);
benchmark::RegisterMemoryManager(nullptr);
}

View File

@ -5,72 +5,84 @@
#include <set>
#include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture {
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
{1, 3, 8},
{1, 3, 15},
{2, 3, 5},
{2, 3, 8},
{2, 3, 15},
{1, 4, 5},
{1, 4, 8},
{1, 4, 15},
{2, 4, 5},
{2, 4, 8},
{2, 4, 15},
{1, 7, 5},
{1, 7, 8},
{1, 7, 15},
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}}) {}
void SetUp(const ::benchmark::State& state) {
std::vector<int64_t> ranges = {state.range(0), state.range(1),
state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture() {
if (actualValues != expectedValues) {
std::cout << "EXPECTED\n";
for (auto v : expectedValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues) {
std::cout << "{";
for (int64_t iv : v) {
std::cout << iv << ", ";
}
std::cout << "}\n";
}
class MultipleRangesFixture : public ::benchmark::Fixture
{
public:
MultipleRangesFixture()
: expectedValues({{1, 3, 5},
{1, 3, 8},
{1, 3, 15},
{2, 3, 5},
{2, 3, 8},
{2, 3, 15},
{1, 4, 5},
{1, 4, 8},
{1, 4, 15},
{2, 4, 5},
{2, 4, 8},
{2, 4, 15},
{1, 7, 5},
{1, 7, 8},
{1, 7, 15},
{2, 7, 5},
{2, 7, 8},
{2, 7, 15},
{7, 6, 3}})
{
}
}
std::set<std::vector<int64_t>> expectedValues;
std::set<std::vector<int64_t>> actualValues;
void SetUp(const ::benchmark::State &state)
{
std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
// NOTE: This is not TearDown as we want to check after _all_ runs are
// complete.
virtual ~MultipleRangesFixture()
{
if (actualValues != expectedValues)
{
std::cout << "EXPECTED\n";
for (auto v : expectedValues)
{
std::cout << "{";
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
}
std::cout << "ACTUAL\n";
for (auto v : actualValues)
{
std::cout << "{";
for (int64_t iv : v)
{
std::cout << iv << ", ";
}
std::cout << "}\n";
}
}
}
std::set<std::vector<int64_t>> expectedValues;
std::set<std::vector<int64_t>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
for (auto _ : state) {
int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State &state)
{
for (auto _ : state)
{
int64_t product = state.range(0) * state.range(1) * state.range(2);
for (int64_t x = 0; x < product; x++)
{
benchmark::DoNotOptimize(x);
}
}
}
}
BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
@ -78,18 +90,22 @@ BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty)
->Ranges({{1, 2}, {3, 7}, {5, 15}})
->Args({7, 6, 3});
void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
for (auto _ : state) {
}
void BM_CheckDefaultArgument(benchmark::State &state)
{
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
for (auto _ : state)
{
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
for (auto _ : st) {
}
static void BM_MultipleRanges(benchmark::State &st)
{
for (auto _ : st)
{
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});

View File

@ -7,17 +7,20 @@
#endif
#include <cassert>
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
}
void BM_basic(benchmark::State &state)
{
for (auto _ : state)
{
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
for (auto _ : state) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
void BM_basic_slow(benchmark::State &state)
{
std::chrono::milliseconds sleep_duration(state.range(0));
for (auto _ : state)
{
std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
}
BENCHMARK(BM_basic);
@ -37,8 +40,7 @@ BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
BENCHMARK(BM_basic)
->RangeMultiplier(std::numeric_limits<int>::max())
->Range(std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max());
->Range(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max());
// Negative ranges
BENCHMARK(BM_basic)->Range(-64, -1);
@ -46,29 +48,31 @@ BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8);
BENCHMARK(BM_basic)->DenseRange(-2, 2, 1);
BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}});
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {
b->Arg(i);
}
void CustomArgs(benchmark::internal::Benchmark *b)
{
for (int i = 0; i < 10; ++i)
{
b->Arg(i);
}
}
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& state) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
for (auto _ : state)
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
void BM_explicit_iteration_count(benchmark::State &state)
{
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
assert(!invoked_before);
invoked_before = true;
// Test that the requested iteration count is respected.
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
for (auto _ : state)
++actual_iterations;
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);

View File

@ -18,34 +18,36 @@
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) \
int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
#define SET_SUBSTITUTIONS(...) int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__)
enum MatchRules {
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
enum MatchRules
{
MR_Default, // Skip non-matching lines until a match is found.
MR_Next, // Match must occur on the next line.
MR_Not // No line between the current position and the next match matches
// the regex
};
struct TestCase {
TestCase(std::string re, int rule = MR_Default);
struct TestCase
{
TestCase(std::string re, int rule = MR_Default);
std::string regex_str;
int match_rule;
std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex;
std::string regex_str;
int match_rule;
std::string substituted_regex;
std::shared_ptr<benchmark::Regex> regex;
};
enum TestCaseID {
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
TC_JSONErr,
TC_CSVOut,
TC_CSVErr,
enum TestCaseID
{
TC_ConsoleOut,
TC_ConsoleErr,
TC_JSONOut,
TC_JSONErr,
TC_CSVOut,
TC_CSVErr,
TC_NumID // PRIVATE
TC_NumID // PRIVATE
};
// Add a list of test cases to be run against the output specified by
@ -54,18 +56,17 @@ int AddCases(TestCaseID ID, std::initializer_list<TestCase> il);
// Add or set a list of substitutions to be performed on constructed regex's
// See 'output_test_helper.cc' for a list of default substitutions.
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il);
int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il);
// Run all output tests.
void RunOutputTests(int argc, char* argv[]);
void RunOutputTests(int argc, char *argv[]);
// Count the number of 'pat' substrings in the 'haystack' string.
int SubstrCnt(const std::string& haystack, const std::string& pat);
int SubstrCnt(const std::string &haystack, const std::string &pat);
// Run registered benchmarks with file reporter enabled, and return the content
// outputted by the file reporter.
std::string GetFileReporterOutput(int argc, char* argv[]);
std::string GetFileReporterOutput(int argc, char *argv[]);
// ========================================================================= //
// ------------------------- Results checking ------------------------------ //
@ -79,77 +80,87 @@ std::string GetFileReporterOutput(int argc, char* argv[]);
// all the benchmark names. Matching benchmarks
// will be the subject of a call to checker_function
// checker_function: should be of type ResultsCheckFn (see below)
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
struct Results;
typedef std::function<void(Results const&)> ResultsCheckFn;
typedef std::function<void(Results const &)> ResultsCheckFn;
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
size_t AddChecker(const char *bm_name_pattern, ResultsCheckFn fn);
// Class holding the results of a benchmark.
// It is passed in calls to checker functions.
struct Results {
// the benchmark name
std::string name;
// the benchmark fields
std::map<std::string, std::string> values;
struct Results
{
// the benchmark name
std::string name;
// the benchmark fields
std::map<std::string, std::string> values;
Results(const std::string& n) : name(n) {}
Results(const std::string &n) : name(n)
{
}
int NumThreads() const;
int NumThreads() const;
double NumIterations() const;
double NumIterations() const;
typedef enum { kCpuTime, kRealTime } BenchmarkTime;
typedef enum
{
kCpuTime,
kRealTime
} BenchmarkTime;
// get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const;
// get cpu_time or real_time in seconds
double GetTime(BenchmarkTime which) const;
// get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy.
double DurationRealTime() const {
return NumIterations() * GetTime(kRealTime);
}
// get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const {
return NumIterations() * GetTime(kCpuTime);
}
// get the real_time duration of the benchmark in seconds.
// it is better to use fuzzy float checks for this, as the float
// ASCII formatting is lossy.
double DurationRealTime() const
{
return NumIterations() * GetTime(kRealTime);
}
// get the cpu_time duration of the benchmark in seconds
double DurationCPUTime() const
{
return NumIterations() * GetTime(kCpuTime);
}
// get the string for a result by name, or nullptr if the name
// is not found
const std::string* Get(const char* entry_name) const {
auto it = values.find(entry_name);
if (it == values.end()) return nullptr;
return &it->second;
}
// get the string for a result by name, or nullptr if the name
// is not found
const std::string *Get(const char *entry_name) const
{
auto it = values.find(entry_name);
if (it == values.end())
return nullptr;
return &it->second;
}
// get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead.
template <class T>
T GetAs(const char* entry_name) const;
// get a result by name, parsed as a specific type.
// NOTE: for counters, use GetCounterAs instead.
template <class T> T GetAs(const char *entry_name) const;
// counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type.
template <class T>
T GetCounterAs(const char* entry_name) const {
double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval);
return tval;
}
// counters are written as doubles, so they have to be read first
// as a double, and only then converted to the asked type.
template <class T> T GetCounterAs(const char *entry_name) const
{
double dval = GetAs<double>(entry_name);
T tval = static_cast<T>(dval);
return tval;
}
};
template <class T>
T Results::GetAs(const char* entry_name) const {
auto* sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
ss << *sv;
T out;
ss >> out;
CHECK(!ss.fail());
return out;
template <class T> T Results::GetAs(const char *entry_name) const
{
auto *sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
ss << *sv;
T out;
ss >> out;
CHECK(!ss.fail());
return out;
}
//----------------------------------
@ -204,10 +215,11 @@ T Results::GetAs(const char* entry_name) const {
// --------------------------- Misc Utilities ------------------------------ //
// ========================================================================= //
namespace {
namespace
{
const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
const char *const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
} // end namespace
} // end namespace
#endif // TEST_OUTPUT_TEST_H
#endif // TEST_OUTPUT_TEST_H

View File

@ -9,15 +9,17 @@
#include <streambuf>
#include "../src/benchmark_api_internal.h"
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/re.h" // NOTE: re.h is for internal use only
#include "output_test.h"
// ========================================================================= //
// ------------------------------ Internals -------------------------------- //
// ========================================================================= //
namespace internal {
namespace {
namespace internal
{
namespace
{
using TestCaseList = std::vector<TestCase>;
@ -28,16 +30,18 @@ using TestCaseList = std::vector<TestCase>;
// Substitute("%HelloWorld") // Always expands to Hello.
using SubMap = std::vector<std::pair<std::string, std::string>>;
TestCaseList& GetTestCaseList(TestCaseID ID) {
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
TestCaseList &GetTestCaseList(TestCaseID ID)
{
// Uses function-local statics to ensure initialization occurs
// before first use.
static TestCaseList lists[TC_NumID];
return lists[ID];
}
SubMap& GetSubstitutions() {
// Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
SubMap &GetSubstitutions()
{
// Don't use 'dec_re' from header because it may not yet be initialized.
// clang-format off
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
static std::string time_re = "([0-9]+[.])?[0-9]+";
static SubMap map = {
@ -65,271 +69,325 @@ SubMap& GetSubstitutions() {
"," + safe_dec_re + ",,,"},
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
{"%csv_label_report_end", ",,"}};
// clang-format on
return map;
// clang-format on
return map;
}
std::string PerformSubstitutions(std::string source) {
SubMap const& subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const& KV : subs) {
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos) {
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
std::string PerformSubstitutions(std::string source)
{
SubMap const &subs = GetSubstitutions();
using SizeT = std::string::size_type;
for (auto const &KV : subs)
{
SizeT pos;
SizeT next_start = 0;
while ((pos = source.find(KV.first, next_start)) != std::string::npos)
{
next_start = pos + KV.second.size();
source.replace(pos, KV.first.size(), KV.second);
}
}
}
return source;
return source;
}
void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
TestCaseList const& not_checks) {
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
first_line = line;
on_first = false;
void CheckCase(std::stringstream &remaining_output, TestCase const &TC, TestCaseList const &not_checks)
{
std::string first_line;
bool on_first = true;
std::string line;
while (remaining_output.eof() == false)
{
CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first)
{
first_line = line;
on_first = false;
}
for (const auto &NC : not_checks)
{
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line))
return;
CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
for (const auto& NC : not_checks) {
CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str << "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
void CheckCases(TestCaseList const& checks, std::stringstream& output) {
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i) {
const auto& TC = checks[i];
if (TC.match_rule == MR_Not) {
not_checks.push_back(TC);
continue;
void CheckCases(TestCaseList const &checks, std::stringstream &output)
{
std::vector<TestCase> not_checks;
for (size_t i = 0; i < checks.size(); ++i)
{
const auto &TC = checks[i];
if (TC.match_rule == MR_Not)
{
not_checks.push_back(TC);
continue;
}
CheckCase(output, TC, not_checks);
not_checks.clear();
}
CheckCase(output, TC, not_checks);
not_checks.clear();
}
}
class TestReporter : public benchmark::BenchmarkReporter {
public:
TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
: reporters_(reps) {}
virtual bool ReportContext(const Context& context) {
bool last_ret = false;
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
class TestReporter : public benchmark::BenchmarkReporter
{
public:
TestReporter(std::vector<benchmark::BenchmarkReporter *> reps) : reporters_(reps)
{
}
(void)first;
return last_ret;
}
void ReportRuns(const std::vector<Run>& report) {
for (auto rep : reporters_) rep->ReportRuns(report);
}
void Finalize() {
for (auto rep : reporters_) rep->Finalize();
}
virtual bool ReportContext(const Context &context)
{
bool last_ret = false;
bool first = true;
for (auto rep : reporters_)
{
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
}
(void)first;
return last_ret;
}
private:
std::vector<benchmark::BenchmarkReporter*> reporters_;
void ReportRuns(const std::vector<Run> &report)
{
for (auto rep : reporters_)
rep->ReportRuns(report);
}
void Finalize()
{
for (auto rep : reporters_)
rep->Finalize();
}
private:
std::vector<benchmark::BenchmarkReporter *> reporters_;
};
} // namespace
} // namespace
} // end namespace internal
} // end namespace internal
// ========================================================================= //
// -------------------------- Results checking ----------------------------- //
// ========================================================================= //
namespace internal {
namespace internal
{
// Utility class to manage subscribers for checking benchmark results.
// It works by parsing the CSV output to read the results.
class ResultsChecker {
public:
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
: TestCase(rx), fn(fn_) {}
ResultsCheckFn fn;
};
class ResultsChecker
{
public:
struct PatternAndFn : public TestCase
{ // reusing TestCase for its regexes
PatternAndFn(const std::string &rx, ResultsCheckFn fn_) : TestCase(rx), fn(fn_)
{
}
ResultsCheckFn fn;
};
std::vector<PatternAndFn> check_patterns;
std::vector<Results> results;
std::vector<std::string> field_names;
std::vector<PatternAndFn> check_patterns;
std::vector<Results> results;
std::vector<std::string> field_names;
void Add(const std::string& entry_pattern, ResultsCheckFn fn);
void Add(const std::string &entry_pattern, ResultsCheckFn fn);
void CheckResults(std::stringstream& output);
void CheckResults(std::stringstream &output);
private:
void SetHeader_(const std::string& csv_header);
void SetValues_(const std::string& entry_csv_line);
private:
void SetHeader_(const std::string &csv_header);
void SetValues_(const std::string &entry_csv_line);
std::vector<std::string> SplitCsv_(const std::string& line);
std::vector<std::string> SplitCsv_(const std::string &line);
};
// store the static ResultsChecker in a function to prevent initialization
// order problems
ResultsChecker& GetResultsChecker() {
static ResultsChecker rc;
return rc;
ResultsChecker &GetResultsChecker()
{
static ResultsChecker rc;
return rc;
}
// add a results checker for a benchmark
void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
check_patterns.emplace_back(entry_pattern, fn);
void ResultsChecker::Add(const std::string &entry_pattern, ResultsCheckFn fn)
{
check_patterns.emplace_back(entry_pattern, fn);
}
// check the results of all subscribed benchmarks
void ResultsChecker::CheckResults(std::stringstream& output) {
// first reset the stream to the start
{
auto start = std::stringstream::pos_type(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
if (output.tellg() > start) output.seekg(start);
// and just in case
output.clear();
}
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false) {
CHECK(output.good());
std::getline(output, line);
if (on_first) {
SetHeader_(line); // this is important
on_first = false;
continue;
void ResultsChecker::CheckResults(std::stringstream &output)
{
// first reset the stream to the start
{
auto start = std::stringstream::pos_type(0);
// clear before calling tellg()
output.clear();
// seek to zero only when needed
if (output.tellg() > start)
output.seekg(start);
// and just in case
output.clear();
}
SetValues_(line);
}
// finally we can call the subscribed check functions
for (const auto& p : check_patterns) {
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto& r : results) {
if (!p.regex->Match(r.name)) {
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
} else {
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
p.fn(r);
VLOG(1) << "Checking results of " << r.name << ": OK.\n";
// now go over every line and publish it to the ResultsChecker
std::string line;
bool on_first = true;
while (output.eof() == false)
{
CHECK(output.good());
std::getline(output, line);
if (on_first)
{
SetHeader_(line); // this is important
on_first = false;
continue;
}
SetValues_(line);
}
// finally we can call the subscribed check functions
for (const auto &p : check_patterns)
{
VLOG(2) << "--------------------------------\n";
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
for (const auto &r : results)
{
if (!p.regex->Match(r.name))
{
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
continue;
}
else
{
VLOG(2) << p.regex_str << " is matched by " << r.name << "\n";
}
VLOG(1) << "Checking results of " << r.name << ": ... \n";
p.fn(r);
VLOG(1) << "Checking results of " << r.name << ": OK.\n";
}
}
}
}
// prepare for the names in this header
void ResultsChecker::SetHeader_(const std::string& csv_header) {
field_names = SplitCsv_(csv_header);
void ResultsChecker::SetHeader_(const std::string &csv_header)
{
field_names = SplitCsv_(csv_header);
}
// set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
if (entry_csv_line.empty()) return; // some lines are empty
CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto& entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) {
entry.values[field_names[i]] = vals[i];
}
void ResultsChecker::SetValues_(const std::string &entry_csv_line)
{
if (entry_csv_line.empty())
return; // some lines are empty
CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto &entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i)
{
entry.values[field_names[i]] = vals[i];
}
}
// a quick'n'dirty csv splitter (eliminating quotes)
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
std::vector<std::string> out;
if (line.empty()) return out;
if (!field_names.empty()) out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) {
CHECK(curr > 0);
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string &line)
{
std::vector<std::string> out;
if (line.empty())
return out;
if (!field_names.empty())
out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos)
{
CHECK(curr > 0);
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"')
++prev;
if (line[curr - 1] == '"')
--curr;
out.push_back(line.substr(prev, curr - prev));
prev = pos + 1;
pos = line.find_first_of(',', pos + 1);
curr = pos;
}
curr = line.size();
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
out.push_back(line.substr(prev, curr - prev));
return out;
return out;
}
} // end namespace internal
} // end namespace internal
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) {
auto& rc = internal::GetResultsChecker();
rc.Add(bm_name, fn);
return rc.results.size();
size_t AddChecker(const char *bm_name, ResultsCheckFn fn)
{
auto &rc = internal::GetResultsChecker();
rc.Add(bm_name, fn);
return rc.results.size();
}
int Results::NumThreads() const {
auto pos = name.find("/threads:");
if (pos == name.npos) return 1;
auto end = name.find('/', pos + 9);
std::stringstream ss;
ss << name.substr(pos + 9, end);
int num = 1;
ss >> num;
CHECK(!ss.fail());
return num;
int Results::NumThreads() const
{
auto pos = name.find("/threads:");
if (pos == name.npos)
return 1;
auto end = name.find('/', pos + 9);
std::stringstream ss;
ss << name.substr(pos + 9, end);
int num = 1;
ss >> num;
CHECK(!ss.fail());
return num;
}
double Results::NumIterations() const {
return GetAs<double>("iterations");
double Results::NumIterations() const
{
return GetAs<double>("iterations");
}
double Results::GetTime(BenchmarkTime which) const {
CHECK(which == kCpuTime || which == kRealTime);
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
CHECK(unit);
if (*unit == "ns") {
return val * 1.e-9;
} else if (*unit == "us") {
return val * 1.e-6;
} else if (*unit == "ms") {
return val * 1.e-3;
} else if (*unit == "s") {
return val;
} else {
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
double Results::GetTime(BenchmarkTime which) const
{
CHECK(which == kCpuTime || which == kRealTime);
const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
CHECK(unit);
if (*unit == "ns")
{
return val * 1.e-9;
}
else if (*unit == "us")
{
return val * 1.e-6;
}
else if (*unit == "ms")
{
return val * 1.e-3;
}
else if (*unit == "s")
{
return val;
}
else
{
CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
}
// ========================================================================= //
@ -337,40 +395,43 @@ double Results::GetTime(BenchmarkTime which) const {
// ========================================================================= //
TestCase::TestCase(std::string re, int rule)
: regex_str(std::move(re)),
match_rule(rule),
substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str;
regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
: regex_str(std::move(re)), match_rule(rule), substituted_regex(internal::PerformSubstitutions(regex_str)),
regex(std::make_shared<benchmark::Regex>())
{
std::string err_str;
regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex << "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
auto& L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il)
{
auto &L = internal::GetTestCaseList(ID);
L.insert(L.end(), il);
return 0;
}
int SetSubstitutions(
std::initializer_list<std::pair<std::string, std::string>> il) {
auto& subs = internal::GetSubstitutions();
for (auto KV : il) {
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto& EKV : subs) {
if (EKV.first == KV.first) {
EKV.second = std::move(KV.second);
exists = true;
break;
}
int SetSubstitutions(std::initializer_list<std::pair<std::string, std::string>> il)
{
auto &subs = internal::GetSubstitutions();
for (auto KV : il)
{
bool exists = false;
KV.second = internal::PerformSubstitutions(KV.second);
for (auto &EKV : subs)
{
if (EKV.first == KV.first)
{
EKV.second = std::move(KV.second);
exists = true;
break;
}
}
if (!exists)
subs.push_back(std::move(KV));
}
if (!exists) subs.push_back(std::move(KV));
}
return 0;
return 0;
}
// Disable deprecated warnings temporarily because we need to reference
@ -379,137 +440,145 @@ int SetSubstitutions(
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest {
const char* name;
std::vector<TestCase>& output_cases;
std::vector<TestCase>& error_cases;
benchmark::BenchmarkReporter& reporter;
std::stringstream out_stream;
std::stringstream err_stream;
void RunOutputTests(int argc, char *argv[])
{
using internal::GetTestCaseList;
benchmark::Initialize(&argc, argv);
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
benchmark::ConsoleReporter CR(options);
benchmark::JSONReporter JR;
benchmark::CSVReporter CSVR;
struct ReporterTest
{
const char *name;
std::vector<TestCase> &output_cases;
std::vector<TestCase> &error_cases;
benchmark::BenchmarkReporter &reporter;
std::stringstream out_stream;
std::stringstream err_stream;
ReporterTest(const char* n, std::vector<TestCase>& out_tc,
std::vector<TestCase>& err_tc,
benchmark::BenchmarkReporter& br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) {
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
ReporterTest(const char *n, std::vector<TestCase> &out_tc, std::vector<TestCase> &err_tc,
benchmark::BenchmarkReporter &br)
: name(n), output_cases(out_tc), error_cases(err_tc), reporter(br)
{
reporter.SetOutputStream(&out_stream);
reporter.SetErrorStream(&err_stream);
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto &rep_test : TestCases)
{
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
} TestCases[] = {
{"ConsoleReporter", GetTestCaseList(TC_ConsoleOut),
GetTestCaseList(TC_ConsoleErr), CR},
{"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr),
JR},
{"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr),
CSVR},
};
// Create the test reporter and run the benchmarks.
std::cout << "Running benchmarks...\n";
internal::TestReporter test_rep({&CR, &JR, &CSVR});
benchmark::RunSpecifiedBenchmarks(&test_rep);
for (auto& rep_test : TestCases) {
std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
std::string banner(msg.size() - 1, '-');
std::cout << banner << msg << banner << "\n";
std::cerr << rep_test.err_stream.str();
std::cout << rep_test.out_stream.str();
internal::CheckCases(rep_test.error_cases, rep_test.err_stream);
internal::CheckCases(rep_test.output_cases, rep_test.out_stream);
std::cout << "\n";
}
// now that we know the output is as expected, we can dispatch
// the checks to subscribees.
auto& csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
// now that we know the output is as expected, we can dispatch
// the checks to subscribees.
auto &csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
int SubstrCnt(const std::string& haystack, const std::string& pat) {
if (pat.length() == 0) return 0;
int count = 0;
for (size_t offset = haystack.find(pat); offset != std::string::npos;
offset = haystack.find(pat, offset + pat.length()))
++count;
return count;
int SubstrCnt(const std::string &haystack, const std::string &pat)
{
if (pat.length() == 0)
return 0;
int count = 0;
for (size_t offset = haystack.find(pat); offset != std::string::npos;
offset = haystack.find(pat, offset + pat.length()))
++count;
return count;
}
static char ToHex(int ch) {
return ch < 10 ? static_cast<char>('0' + ch)
: static_cast<char>('a' + (ch - 10));
static char ToHex(int ch)
{
return ch < 10 ? static_cast<char>('0' + ch) : static_cast<char>('a' + (ch - 10));
}
static char RandomHexChar() {
static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
static char RandomHexChar()
{
static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
}
static std::string GetRandomFileName() {
std::string model = "test.%%%%%%";
for (auto & ch : model) {
if (ch == '%')
ch = RandomHexChar();
}
return model;
static std::string GetRandomFileName()
{
std::string model = "test.%%%%%%";
for (auto &ch : model)
{
if (ch == '%')
ch = RandomHexChar();
}
return model;
}
static bool FileExists(std::string const& name) {
std::ifstream in(name.c_str());
return in.good();
static bool FileExists(std::string const &name)
{
std::ifstream in(name.c_str());
return in.good();
}
static std::string GetTempFileName() {
// This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries) {
std::string name = GetRandomFileName();
if (!FileExists(name))
return name;
}
std::cerr << "Failed to create unique temporary file name" << std::endl;
std::abort();
static std::string GetTempFileName()
{
// This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries)
{
std::string name = GetRandomFileName();
if (!FileExists(name))
return name;
}
std::cerr << "Failed to create unique temporary file name" << std::endl;
std::abort();
}
std::string GetFileReporterOutput(int argc, char* argv[]) {
std::vector<char*> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string GetFileReporterOutput(int argc, char *argv[])
{
std::vector<char *> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string tmp_file_name = GetTempFileName();
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
std::string tmp_file_name = GetTempFileName();
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
std::string tmp = "--benchmark_out=";
tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
std::string tmp = "--benchmark_out=";
tmp += tmp_file_name;
new_argv.emplace_back(const_cast<char *>(tmp.c_str()));
argc = int(new_argv.size());
argc = int(new_argv.size());
benchmark::Initialize(&argc, new_argv.data());
benchmark::RunSpecifiedBenchmarks();
benchmark::Initialize(&argc, new_argv.data());
benchmark::RunSpecifiedBenchmarks();
// Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
std::istreambuf_iterator<char>());
std::remove(tmp_file_name.c_str());
// Read the output back from the file, and delete the file.
std::ifstream tmp_stream(tmp_file_name);
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)), std::istreambuf_iterator<char>());
std::remove(tmp_file_name.c_str());
return output;
return output;
}

View File

@ -3,33 +3,41 @@
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
std::vector<Run> all_runs_;
std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
const char* label;
// Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char* xname) : TestCase(xname, nullptr) {}
TestCase(const char* xname, const char* xlabel)
: name(xname), label(xlabel) {}
struct TestCase
{
std::string name;
const char *label;
// Note: not explicit as we rely on it being converted through ADD_CASES.
TestCase(const char *xname) : TestCase(xname, nullptr)
{
}
TestCase(const char *xname, const char *xlabel) : name(xname), label(xlabel)
{
}
typedef benchmark::BenchmarkReporter::Run Run;
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
// clang-format off
void CheckRun(Run const &run) const
{
// clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name();
if (label) {
@ -38,37 +46,40 @@ struct TestCase {
} else {
CHECK(run.report_label == "");
}
// clang-format on
}
// clang-format on
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(std::initializer_list<TestCase> const& v) {
for (auto N : v) {
ExpectedResults.push_back(N);
}
return 0;
int AddCases(std::initializer_list<TestCase> const &v)
{
for (auto N : v)
{
ExpectedResults.push_back(N);
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases({__VA_ARGS__})
} // end namespace
} // end namespace
typedef benchmark::internal::Benchmark* ReturnVal;
typedef benchmark::internal::Benchmark *ReturnVal;
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
for (auto _ : state) {
}
void BM_function(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark(
"BM_function_manual_registration", BM_function);
ReturnVal dummy = benchmark::RegisterBenchmark("BM_function_manual_registration", BM_function);
ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
@ -78,107 +89,118 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
//----------------------------------------------------------------------------//
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
for (auto _ : st) {
}
st.SetLabel(label);
void BM_extra_args(benchmark::State &st, const char *label)
{
for (auto _ : st)
{
}
st.SetLabel(label);
}
int RegisterFromFunction() {
std::pair<const char*, const char*> cases[] = {
{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const& c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
int RegisterFromFunction()
{
std::pair<const char *, const char *> cases[] = {{"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}};
for (auto const &c : cases)
benchmark::RegisterBenchmark(c.first, &BM_extra_args, c.second);
return 0;
}
int dummy2 = RegisterFromFunction();
ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
#endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
//----------------------------------------------------------------------------//
// Test RegisterBenchmark with different callable types
//----------------------------------------------------------------------------//
struct CustomFixture {
void operator()(benchmark::State& st) {
for (auto _ : st) {
struct CustomFixture
{
void operator()(benchmark::State &st)
{
for (auto _ : st)
{
}
}
}
};
void TestRegistrationAtRuntime() {
void TestRegistrationAtRuntime()
{
#ifdef BENCHMARK_HAS_CXX11
{
CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"});
}
{
CustomFixture fx;
benchmark::RegisterBenchmark("custom_fixture", fx);
AddCases({"custom_fixture"});
}
#endif
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
{
const char* x = "42";
auto capturing_lam = [=](benchmark::State& st) {
for (auto _ : st) {
}
st.SetLabel(x);
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", x}});
}
{
const char *x = "42";
auto capturing_lam = [=](benchmark::State &st) {
for (auto _ : st)
{
}
st.SetLabel(x);
};
benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam);
AddCases({{"lambda_benchmark", x}});
}
#endif
}
// Test that all benchmarks, registered at either during static init or runtime,
// are run and the results are passed to the reported.
void RunTestOne() {
TestRegistrationAtRuntime();
void RunTestOne()
{
TestRegistrationAtRuntime();
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
}
// Test that ClearRegisteredBenchmarks() clears all previously registered
// benchmarks.
// Also test that new benchmarks can be registered and ran afterwards.
void RunTestTwo() {
assert(ExpectedResults.size() != 0 &&
"must have at least one registered benchmark");
ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks();
void RunTestTwo()
{
assert(ExpectedResults.size() != 0 && "must have at least one registered benchmark");
ExpectedResults.clear();
benchmark::ClearRegisteredBenchmarks();
TestReporter test_reporter;
size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == 0);
assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end());
TestReporter test_reporter;
size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == 0);
assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end());
TestRegistrationAtRuntime();
num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == ExpectedResults.size());
TestRegistrationAtRuntime();
num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter);
assert(num_ran == ExpectedResults.size());
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
}
int main(int argc, char* argv[]) {
benchmark::Initialize(&argc, argv);
int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv);
RunTestOne();
RunTestTwo();
RunTestOne();
RunTestTwo();
}

View File

@ -10,30 +10,32 @@
// reporter in the presence of ReportAggregatesOnly().
// We do not care about console output, the normal tests check that already.
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
int main(int argc, char* argv[]) {
const std::string output = GetFileReporterOutput(argc, argv);
int main(int argc, char *argv[])
{
const std::string output = GetFileReporterOutput(argc, argv);
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
1) {
std::cout << "Precondition mismatch. Expected to only find three "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 ||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != 1)
{
std::cout << "Precondition mismatch. Expected to only find three "
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
"output:\n";
std::cout << output;
return 1;
}
return 0;
return 0;
}

View File

@ -9,53 +9,49 @@
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
{"^[-]+$", MR_Next}});
static int AddContextCases() {
AddCases(TC_ConsoleErr,
{
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut,
{{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"cpu_scaling_enabled\": ", MR_Next},
{"\"caches\": \\[$", MR_Next}});
auto const& Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches;
if (!Caches.empty()) {
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
for (size_t I = 0; I < Caches.size(); ++I) {
std::string num_caches_str =
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr,
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
{"\"size\": %int,$", MR_Next},
{"\"num_sharing\": %int$", MR_Next},
{"}[,]{0,1}$", MR_Next}});
}
AddCases(TC_JSONOut, {{"],$"}});
auto const& LoadAvg = Info.load_avg;
if (!LoadAvg.empty()) {
AddCases(TC_ConsoleErr,
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
ADD_CASES(TC_ConsoleOut,
{{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, {"^[-]+$", MR_Next}});
static int AddContextCases()
{
AddCases(TC_ConsoleErr, {
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut, {{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"cpu_scaling_enabled\": ", MR_Next},
{"\"caches\": \\[$", MR_Next}});
auto const &Info = benchmark::CPUInfo::Get();
auto const &Caches = Info.caches;
if (!Caches.empty())
{
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
for (size_t I = 0; I < Caches.size(); ++I)
{
std::string num_caches_str = Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr, {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
{"\"size\": %int,$", MR_Next},
{"\"num_sharing\": %int$", MR_Next},
{"}[,]{0,1}$", MR_Next}});
}
AddCases(TC_JSONOut, {{"],$"}});
auto const &LoadAvg = Info.load_avg;
if (!LoadAvg.empty())
{
AddCases(TC_ConsoleErr, {{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
return 0;
}
int dummy_register = AddContextCases();
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
@ -64,9 +60,11 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
}
void BM_basic(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_basic);
@ -88,12 +86,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetBytesProcessed(1);
void BM_bytes_per_second(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
@ -117,12 +117,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetItemsProcessed(1);
void BM_items_per_second(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
@ -146,10 +148,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ------------------------ Testing Label Output --------------------------- //
// ========================================================================= //
void BM_label(benchmark::State& state) {
for (auto _ : state) {
}
state.SetLabel("some label");
void BM_label(benchmark::State &state)
{
for (auto _ : state)
{
}
state.SetLabel("some label");
}
BENCHMARK(BM_label);
@ -173,10 +177,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
for (auto _ : state) {
}
void BM_error(benchmark::State &state)
{
state.SkipWithError("message");
for (auto _ : state)
{
}
}
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
@ -196,9 +202,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// //
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
void BM_no_arg_name(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
@ -214,9 +222,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ------------------------ Testing Arg Name Output ----------------------- //
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
void BM_arg_name(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
@ -232,68 +242,70 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
for (auto _ : state) {
}
void BM_arg_names(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
void BM_BigArgs(benchmark::State& state) {
for (auto _ : state) {
}
void BM_BigArgs(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
{"^BM_BigArgs/2147483648 %console_report$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, {"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
void BM_Complexity_O1(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
{"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"}, {"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"}, {"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= //
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
for (auto _ : state) {
}
void BM_Repeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
// need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -334,13 +346,12 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -388,14 +399,13 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
// median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -451,9 +461,11 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
for (auto _ : state) {
}
void BM_RepeatOnce(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
@ -466,40 +478,39 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
void BM_SummaryRepeat(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
@ -508,106 +519,99 @@ ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
// Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) {
for (auto _ : state) {
}
void BM_SummaryDisplay(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit.
void BM_RepeatTimeUnit(benchmark::State& state) {
for (auto _ : state) {
}
void BM_RepeatTimeUnit(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(
TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
BENCHMARK(BM_RepeatTimeUnit)->Repetitions(3)->ReportAggregatesOnly()->Unit(benchmark::kMicrosecond);
ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}});
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
// ========================================================================= //
// -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) {
return v.back();
};
void BM_UserStats(benchmark::State& state) {
for (auto _ : state) {
state.SetIterationTime(150 / 10e8);
}
const auto UserStatistics = [](const std::vector<double> &v) { return v.back(); };
void BM_UserStats(benchmark::State &state)
{
for (auto _ : state)
{
state.SetIterationTime(150 / 10e8);
}
}
// clang-format off
BENCHMARK(BM_UserStats)
@ -633,82 +637,71 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
// ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ //
@ -733,10 +726,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
// -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= //
void BM_CSV_Format(benchmark::State& state) {
state.SkipWithError("\"freedom\"");
for (auto _ : state) {
}
void BM_CSV_Format(benchmark::State &state)
{
state.SkipWithError("\"freedom\"");
for (auto _ : state)
{
}
}
BENCHMARK(BM_CSV_Format);
ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
@ -745,4 +740,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -3,109 +3,135 @@
#include <cassert>
#include <vector>
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "../src/check.h" // NOTE: check.h is for internal use only!
#include "benchmark/benchmark.h"
namespace {
namespace
{
class TestReporter : public benchmark::ConsoleReporter {
public:
virtual bool ReportContext(const Context& context) {
return ConsoleReporter::ReportContext(context);
};
class TestReporter : public benchmark::ConsoleReporter
{
public:
virtual bool ReportContext(const Context &context)
{
return ConsoleReporter::ReportContext(context);
};
virtual void ReportRuns(const std::vector<Run>& report) {
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
virtual void ReportRuns(const std::vector<Run> &report)
{
all_runs_.insert(all_runs_.end(), begin(report), end(report));
ConsoleReporter::ReportRuns(report);
}
TestReporter() {}
virtual ~TestReporter() {}
TestReporter()
{
}
virtual ~TestReporter()
{
}
mutable std::vector<Run> all_runs_;
mutable std::vector<Run> all_runs_;
};
struct TestCase {
std::string name;
bool error_occurred;
std::string error_message;
struct TestCase
{
std::string name;
bool error_occurred;
std::string error_message;
typedef benchmark::BenchmarkReporter::Run Run;
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name())
<< "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred) {
// CHECK(run.iterations == 0);
} else {
CHECK(run.iterations != 0);
void CheckRun(Run const &run) const
{
CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
if (error_occurred)
{
// CHECK(run.iterations == 0);
}
else
{
CHECK(run.iterations != 0);
}
}
}
};
std::vector<TestCase> ExpectedResults;
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
for (auto TC : v) {
TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC));
}
return 0;
int AddCases(const char *base_name, std::initializer_list<TestCase> const &v)
{
for (auto TC : v)
{
TC.name = base_name + TC.name;
ExpectedResults.push_back(std::move(TC));
}
return 0;
}
#define CONCAT(x, y) CONCAT2(x, y)
#define CONCAT2(x, y) x##y
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
} // end namespace
} // end namespace
void BM_error_no_running(benchmark::State& state) {
state.SkipWithError("error message");
void BM_error_no_running(benchmark::State &state)
{
state.SkipWithError("error message");
}
BENCHMARK(BM_error_no_running);
ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
void BM_error_before_running(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunning()) {
assert(false);
}
void BM_error_before_running(benchmark::State &state)
{
state.SkipWithError("error message");
while (state.KeepRunning())
{
assert(false);
}
}
BENCHMARK(BM_error_before_running);
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
void BM_error_before_running_batch(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunningBatch(17)) {
assert(false);
}
void BM_error_before_running_batch(benchmark::State &state)
{
state.SkipWithError("error message");
while (state.KeepRunningBatch(17))
{
assert(false);
}
}
BENCHMARK(BM_error_before_running_batch);
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
void BM_error_before_running_range_for(benchmark::State& state) {
state.SkipWithError("error message");
for (auto _ : state) {
assert(false);
}
void BM_error_before_running_range_for(benchmark::State &state)
{
state.SkipWithError("error message");
for (auto _ : state)
{
assert(false);
}
}
BENCHMARK(BM_error_before_running_range_for);
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
void BM_error_during_running(benchmark::State& state) {
int first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
void BM_error_during_running(benchmark::State &state)
{
int first_iter = true;
while (state.KeepRunning())
{
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
}
}
}
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
@ -117,33 +143,37 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
void BM_error_during_running_ranged_for(benchmark::State& state) {
assert(state.max_iterations > 3 && "test requires at least a few iterations");
int first_iter = true;
// NOTE: Users should not write the for loop explicitly.
for (auto It = state.begin(), End = state.end(); It != End; ++It) {
if (state.range(0) == 1) {
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
// Test the unfortunate but documented behavior that the ranged-for loop
// doesn't automatically terminate when SkipWithError is set.
assert(++It != End);
break; // Required behavior
void BM_error_during_running_ranged_for(benchmark::State &state)
{
assert(state.max_iterations > 3 && "test requires at least a few iterations");
int first_iter = true;
// NOTE: Users should not write the for loop explicitly.
for (auto It = state.begin(), End = state.end(); It != End; ++It)
{
if (state.range(0) == 1)
{
assert(first_iter);
first_iter = false;
state.SkipWithError("error message");
// Test the unfortunate but documented behavior that the ranged-for loop
// doesn't automatically terminate when SkipWithError is set.
assert(++It != End);
break; // Required behavior
}
}
}
}
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
ADD_CASES("BM_error_during_running_ranged_for",
{{"/1/iterations:5", true, "error message"},
{"/2/iterations:5", false, ""}});
{{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}});
void BM_error_after_running(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
state.SkipWithError("error message");
void BM_error_after_running(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
if (state.thread_index <= (state.threads / 2))
state.SkipWithError("error message");
}
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
@ -151,19 +181,24 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
{"/threads:4", true, "error message"},
{"/threads:8", true, "error message"}});
void BM_error_while_paused(benchmark::State& state) {
bool first_iter = true;
while (state.KeepRunning()) {
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
assert(first_iter);
first_iter = false;
state.PauseTiming();
state.SkipWithError("error message");
} else {
state.PauseTiming();
state.ResumeTiming();
void BM_error_while_paused(benchmark::State &state)
{
bool first_iter = true;
while (state.KeepRunning())
{
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2))
{
assert(first_iter);
first_iter = false;
state.PauseTiming();
state.SkipWithError("error message");
}
else
{
state.PauseTiming();
state.ResumeTiming();
}
}
}
}
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
@ -175,21 +210,23 @@ ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
{"/2/threads:4", false, ""},
{"/2/threads:8", false, ""}});
int main(int argc, char* argv[]) {
benchmark::Initialize(&argc, argv);
int main(int argc, char *argv[])
{
benchmark::Initialize(&argc, argv);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
TestReporter test_reporter;
benchmark::RunSpecifiedBenchmarks(&test_reporter);
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
typedef benchmark::BenchmarkReporter::Run Run;
auto EB = ExpectedResults.begin();
for (Run const& run : test_reporter.all_runs_) {
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
for (Run const &run : test_reporter.all_runs_)
{
assert(EB != ExpectedResults.end());
EB->CheckRun(run);
++EB;
}
assert(EB == ExpectedResults.end());
return 0;
return 0;
}

View File

@ -15,54 +15,58 @@ extern "C" {
using benchmark::State;
// CHECK-LABEL: test_for_auto_loop:
extern "C" int test_for_auto_loop() {
State& S = GetState();
int x = 42;
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]]
extern "C" int test_for_auto_loop()
{
State &S = GetState();
int x = 42;
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK-NEXT: testq %rbx, %rbx
// CHECK-NEXT: je [[LOOP_END:.*]]
for (auto _ : S) {
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
benchmark::DoNotOptimize(x);
}
// CHECK: [[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
for (auto _ : S)
{
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
// CHECK-GNU-NEXT: subq $1, %rbx
// CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}}
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
benchmark::DoNotOptimize(x);
}
// CHECK: [[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
}
// CHECK-LABEL: test_while_loop:
extern "C" int test_while_loop() {
State& S = GetState();
int x = 42;
extern "C" int test_while_loop()
{
State &S = GetState();
int x = 42;
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning()) {
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]]
benchmark::DoNotOptimize(x);
}
// CHECK-DAG: movq [[DEST]], %[[IREG]]
// CHECK-DAG: testq %[[IREG]], %[[IREG]]
// CHECK-DAG: jne .L[[LOOP_BODY]]
// CHECK-DAG: .L[[LOOP_HEADER]]:
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
while (S.KeepRunning())
{
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
// CHECK: movq %[[IREG]], [[DEST:.*]]
benchmark::DoNotOptimize(x);
}
// CHECK-DAG: movq [[DEST]], %[[IREG]]
// CHECK-DAG: testq %[[IREG]], %[[IREG]]
// CHECK-DAG: jne .L[[LOOP_BODY]]
// CHECK-DAG: .L[[LOOP_HEADER]]:
// CHECK: cmpb $0
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK: cmpb $0
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
// CHECK: .L[[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: .L[[LOOP_END]]:
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
// CHECK: movl $101, %eax
// CHECK: ret
return 101;
}

View File

@ -5,24 +5,27 @@
#include "../src/statistics.h"
#include "gtest/gtest.h"
namespace {
TEST(StatisticsTest, Mean) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
namespace
{
TEST(StatisticsTest, Mean)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
}
TEST(StatisticsTest, Median) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
TEST(StatisticsTest, Median)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
}
TEST(StatisticsTest, StdDev) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
1.151086443322134);
TEST(StatisticsTest, StdDev)
{
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), 1.151086443322134);
}
} // end namespace
} // end namespace

View File

@ -2,152 +2,153 @@
// statistics_test - Unit tests for src/statistics.cc
//===---------------------------------------------------------------------===//
#include "../src/string_util.h"
#include "../src/internal_macros.h"
#include "../src/string_util.h"
#include "gtest/gtest.h"
namespace {
TEST(StringUtilTest, stoul) {
{
size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3ul, pos);
}
namespace
{
TEST(StringUtilTest, stoul)
{
{
size_t pos = 0;
EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3ul, pos);
}
#if ULONG_MAX == 0xFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
EXPECT_EQ(10ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
EXPECT_EQ(10ul, pos);
}
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20ul, pos);
}
#endif
{
size_t pos = 0;
EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
}
{
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
}
#endif
}
TEST(StringUtilTest, stoi) {
{
size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
TEST(StringUtilTest, stoi){{size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1ul, pos);
} // namespace
{
size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4ul, pos);
}
{
}
{
size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4ul, pos);
}
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
{
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
}
}
#endif
}
TEST(StringUtilTest, stod) {
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8ul, pos);
}
TEST(StringUtilTest, stod)
{
{
size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4ul, pos);
}
{
size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3ul, pos);
}
{
size_t pos = 0;
/* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8ul, pos);
}
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
{
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
}
{
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
}
#endif
}
} // end namespace
} // end namespace

View File

@ -4,24 +4,30 @@
#include <cassert>
#include <memory>
template <typename T>
class MyFixture : public ::benchmark::Fixture {
public:
MyFixture() : data(0) {}
template <typename T> class MyFixture : public ::benchmark::Fixture
{
public:
MyFixture() : data(0)
{
}
T data;
T data;
};
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) {
for (auto _ : st) {
data += 1;
}
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st)
{
for (auto _ : st)
{
data += 1;
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
for (auto _ : st) {
data += 1.0;
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State &st)
{
for (auto _ : st)
{
data += 1.0;
}
}
BENCHMARK_REGISTER_F(MyFixture, Bar);

View File

@ -7,11 +7,10 @@
// @todo: <jpmag> this checks the full output at once; the rule for
// CounterSet1 was failing because it was not matching "^[-]+$".
// @todo: <jpmag> check that the counters are vertically aligned.
ADD_CASES(
TC_ConsoleOut,
{
// keeping these lines long improves readability, so:
// clang-format off
ADD_CASES(TC_ConsoleOut,
{
// keeping these lines long improves readability, so:
// clang-format off
{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
{"^[-]+$", MR_Next},
@ -46,8 +45,8 @@ ADD_CASES(
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
// clang-format on
});
// clang-format on
});
ADD_CASES(TC_CSVOut, {{"%csv_header,"
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
@ -55,49 +54,51 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreads}},
{"Bar", {2, bm::Counter::kAvgThreads}},
{"Baz", {4, bm::Counter::kAvgThreads}},
{"Bat", {8, bm::Counter::kAvgThreads}},
{"Frob", {16, bm::Counter::kAvgThreads}},
{"Lob", {32, bm::Counter::kAvgThreads}},
});
void BM_Counters_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreads}},
{"Bar", {2, bm::Counter::kAvgThreads}},
{"Baz", {4, bm::Counter::kAvgThreads}},
{"Bat", {8, bm::Counter::kAvgThreads}},
{"Frob", {16, bm::Counter::kAvgThreads}},
{"Lob", {32, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabular(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
void CheckTabular(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
@ -105,134 +106,138 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// -------------------- Tabular+Rate Counters Output ----------------------- //
// ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
});
void BM_CounterRates_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
});
}
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float,$", MR_Next},
{"\"Frob\": %float,$", MR_Next},
{"\"Lob\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckTabularRate(Results const& e) {
double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
void CheckTabularRate(Results const &e)
{
double t = e.DurationCPUTime();
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
&CheckTabularRate);
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate);
// ========================================================================= //
// ------------------------- Tabular Counters Output ----------------------- //
// ========================================================================= //
// set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bar", {20, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
void BM_CounterSet0_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bar", {20, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet0(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
void CheckSet0(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again.
void BM_CounterSet1_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {15, bm::Counter::kAvgThreads}},
{"Bar", {25, bm::Counter::kAvgThreads}},
{"Baz", {45, bm::Counter::kAvgThreads}},
});
void BM_CounterSet1_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {15, bm::Counter::kAvgThreads}},
{"Bar", {25, bm::Counter::kAvgThreads}},
{"Baz", {45, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bar\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
"%float,,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet1(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
void CheckSet1(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
@ -241,40 +246,42 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// ========================================================================= //
// set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bat", {30, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
void BM_CounterSet2_Tabular(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"Foo", {10, bm::Counter::kAvgThreads}},
{"Bat", {30, bm::Counter::kAvgThreads}},
{"Baz", {40, bm::Counter::kAvgThreads}},
});
}
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"Bat\": %float,$", MR_Next},
{"\"Baz\": %float,$", MR_Next},
{"\"Foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
",%float,%float,%float,,"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSet2(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
void CheckSet2(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
}
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
@ -282,4 +289,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -22,15 +22,16 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ------------------------- Simple Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
void BM_Counters_Simple(benchmark::State &state)
{
for (auto _ : state)
{
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
}
BENCHMARK(BM_Counters_Simple);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -47,11 +48,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckSimple(Results const& e) {
double its = e.NumIterations();
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the value of bar is within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
void CheckSimple(Results const &e)
{
double its = e.NumIterations();
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
// check that the value of bar is within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
@ -59,71 +61,73 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
// --------------------- Counters+Items+Bytes/s Output --------------------- //
// ========================================================================= //
namespace {
namespace
{
int num_calls1 = 0;
}
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
state.SetBytesProcessed(364);
state.SetItemsProcessed(150);
void BM_Counters_WithBytesAndItemsPSec(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
state.SetBytesProcessed(364);
state.SetItemsProcessed(150);
}
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
"bar=%hrfloat bytes_per_second=%hrfloat/s "
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"bytes_per_second\": %float,$", MR_Next},
{"\"foo\": %float,$", MR_Next},
{"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"bytes_per_second\": %float,$", MR_Next},
{"\"foo\": %float,$", MR_Next},
{"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
"%csv_bytes_items_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckBytesAndItemsPSec(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
void CheckBytesAndItemsPSec(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
&CheckBytesAndItemsPSec);
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec);
// ========================================================================= //
// ------------------------- Rate Counters Output -------------------------- //
// ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
void BM_Counters_Rate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
}
BENCHMARK(BM_Counters_Rate);
ADD_CASES(
TC_ConsoleOut,
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -140,11 +144,12 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckRate(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
void CheckRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
@ -152,18 +157,19 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ----------------------- Inverted Counters Output ------------------------ //
// ========================================================================= //
void BM_Invert(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
void BM_Invert(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert};
}
BENCHMARK(BM_Invert);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
{"\"run_name\": \"BM_Invert\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
@ -180,9 +186,10 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"},
ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvert(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
void CheckInvert(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001);
}
CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
@ -191,43 +198,42 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
// -------------------------- //
// ========================================================================= //
void BM_Counters_InvertedRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] =
bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
void BM_Counters_InvertedRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert};
state.counters["bar"] = bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert};
}
BENCHMARK(BM_Counters_InvertedRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report "
"bar=%hrfloats foo=%hrfloats$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_InvertedRate\",$"},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_InvertedRate\",$"},
{"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckInvertedRate(Results const& e) {
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
void CheckInvertedRate(Results const &e)
{
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
@ -235,37 +241,37 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate);
// ------------------------- Thread Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
void BM_Counters_Threads(benchmark::State &state)
{
for (auto _ : state)
{
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
}
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThreads(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
void CheckThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
@ -273,209 +279,207 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
void BM_Counters_AvgThreads(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
}
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
"%console_report bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreads(Results const& e) {
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
void CheckAvgThreads(Results const &e)
{
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
&CheckAvgThreads);
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", &CheckAvgThreads);
// ========================================================================= //
// ---------------------- ThreadAvg Counters Output ------------------------ //
// ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
void BM_Counters_AvgThreadsRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
}
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
"threads:%int\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgThreadsRate(Results const& e) {
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
void CheckAvgThreadsRate(Results const &e)
{
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
&CheckAvgThreadsRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", &CheckAvgThreadsRate);
// ========================================================================= //
// ------------------- IterationInvariant Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_IterationInvariant(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
void BM_Counters_IterationInvariant(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
}
BENCHMARK(BM_Counters_IterationInvariant);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"},
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIterationInvariant(Results const& e) {
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
void CheckIterationInvariant(Results const &e)
{
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
&CheckIterationInvariant);
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", &CheckIterationInvariant);
// ========================================================================= //
// ----------------- IterationInvariantRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] =
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
void BM_Counters_kIsIterationInvariantRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
}
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckIsIterationInvariantRate(Results const& e) {
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
void CheckIsIterationInvariantRate(Results const &e)
{
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
&CheckIsIterationInvariantRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate);
// ========================================================================= //
// ------------------- AvgIterations Counters Output ------------------ //
// ========================================================================= //
void BM_Counters_AvgIterations(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
void BM_Counters_AvgIterations(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
}
BENCHMARK(BM_Counters_AvgIterations);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
"bar=%hrfloat foo=%hrfloat$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"},
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterations(Results const& e) {
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
void CheckAvgIterations(Results const &e)
{
double its = e.NumIterations();
// check that the values are within 0.1% of the expected value
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
@ -483,49 +487,52 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
// ----------------- AvgIterationsRate Counters Output ---------------- //
// ========================================================================= //
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] =
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
void BM_Counters_kAvgIterationsRate(benchmark::State &state)
{
for (auto _ : state)
{
// This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations());
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
}
BENCHMARK(BM_Counters_kAvgIterationsRate);
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 0,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"bar\": %float,$", MR_Next},
{"\"foo\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
"%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckAvgIterationsRate(Results const& e) {
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
void CheckAvgIterationsRate(Results const &e)
{
double its = e.NumIterations();
double t = e.DurationCPUTime(); // this (and not real time) is the time used
// check that the values are within 0.1% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
&CheckAvgIterationsRate);
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", &CheckAvgIterationsRate);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -8,161 +8,149 @@
// ------------------------ Thousands Customisation ------------------------ //
// ========================================================================= //
void BM_Counters_Thousands(benchmark::State& state) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
{"t0_1000000DefaultBase",
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
benchmark::Counter::OneK::kIs1024)},
});
void BM_Counters_Thousands(benchmark::State &state)
{
for (auto _ : state)
{
}
namespace bm = benchmark;
state.counters.insert({
{"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1000)},
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, benchmark::Counter::OneK::kIs1024)},
});
}
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
ADD_CASES(
TC_ConsoleOut,
{
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_median %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_ConsoleOut, {
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2 %console_report "
"t0_1000000DefaultBase=1000k "
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_median %console_report "
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
"t4_1048576Base1024=1024k$"},
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
{"}", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
"0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/"
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
void CheckThousands(Results const& e) {
if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates!
void CheckThousands(Results const &e)
{
if (e.name != "BM_Counters_Thousands/repeats:2")
return; // Do not check the aggregates!
// check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
// check that the values are within 0.01% of the expected values
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
}
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
@ -170,4 +158,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
int main(int argc, char *argv[])
{
RunOutputTests(argc, argv);
}

View File

@ -40,7 +40,8 @@
#include "gtest/internal/gtest-death-test-internal.h"
namespace testing {
namespace testing
{
// This flag controls the style of death tests. Valid values are "threadsafe",
// meaning that the death test child process will re-execute the test binary
@ -51,7 +52,8 @@ GTEST_DECLARE_string_(death_test_style);
#if GTEST_HAS_DEATH_TEST
namespace internal {
namespace internal
{
// Returns a Boolean value indicating whether the caller is currently
// executing in the context of the death test child process. Tools such as
@ -60,7 +62,7 @@ namespace internal {
// implementation of death tests. User code MUST NOT use it.
GTEST_API_ bool InDeathTestChild();
} // namespace internal
} // namespace internal
// The following macros are useful for writing death tests.
@ -165,51 +167,51 @@ GTEST_API_ bool InDeathTestChild();
// Asserts that a given statement causes the program to exit, with an
// integer exit status that satisfies predicate, and emitting error output
// that matches regex.
# define ASSERT_EXIT(statement, predicate, regex) \
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
#define ASSERT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
// Like ASSERT_EXIT, but continues on to successive tests in the
// test suite, if any:
# define EXPECT_EXIT(statement, predicate, regex) \
GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
#define EXPECT_EXIT(statement, predicate, regex) GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
// Asserts that a given statement causes the program to exit, either by
// explicitly exiting with a nonzero exit code or being killed by a
// signal, and emitting error output that matches regex.
# define ASSERT_DEATH(statement, regex) \
ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
#define ASSERT_DEATH(statement, regex) ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Like ASSERT_DEATH, but continues on to successive tests in the
// test suite, if any:
# define EXPECT_DEATH(statement, regex) \
EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
#define EXPECT_DEATH(statement, regex) EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
// Tests that an exit code describes a normal exit with a given exit code.
class GTEST_API_ ExitedWithCode {
public:
explicit ExitedWithCode(int exit_code);
bool operator()(int exit_status) const;
private:
// No implementation - assignment is unsupported.
void operator=(const ExitedWithCode& other);
class GTEST_API_ ExitedWithCode
{
public:
explicit ExitedWithCode(int exit_code);
bool operator()(int exit_status) const;
const int exit_code_;
private:
// No implementation - assignment is unsupported.
void operator=(const ExitedWithCode &other);
const int exit_code_;
};
# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
#if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
// Tests that an exit code describes an exit due to termination by a
// given signal.
// GOOGLETEST_CM0006 DO NOT DELETE
class GTEST_API_ KilledBySignal {
public:
explicit KilledBySignal(int signum);
bool operator()(int exit_status) const;
private:
const int signum_;
class GTEST_API_ KilledBySignal
{
public:
explicit KilledBySignal(int signum);
bool operator()(int exit_status) const;
private:
const int signum_;
};
# endif // !GTEST_OS_WINDOWS
#endif // !GTEST_OS_WINDOWS
// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
// The death testing framework causes this to have interesting semantics,
@ -254,24 +256,20 @@ class GTEST_API_ KilledBySignal {
// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
// }, "death");
//
# ifdef NDEBUG
#ifdef NDEBUG
# define EXPECT_DEBUG_DEATH(statement, regex) \
GTEST_EXECUTE_STATEMENT_(statement, regex)
#define EXPECT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \
GTEST_EXECUTE_STATEMENT_(statement, regex)
#define ASSERT_DEBUG_DEATH(statement, regex) GTEST_EXECUTE_STATEMENT_(statement, regex)
# else
#else
# define EXPECT_DEBUG_DEATH(statement, regex) \
EXPECT_DEATH(statement, regex)
#define EXPECT_DEBUG_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
# define ASSERT_DEBUG_DEATH(statement, regex) \
ASSERT_DEATH(statement, regex)
#define ASSERT_DEBUG_DEATH(statement, regex) ASSERT_DEATH(statement, regex)
# endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST
#endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST
// This macro is used for implementing macros such as
// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
@ -308,18 +306,21 @@ class GTEST_API_ KilledBySignal {
// statement unconditionally returns or throws. The Message constructor at
// the end allows the syntax of streaming additional messages into the
// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \
GTEST_LOG_(WARNING) \
<< "Death tests are not supported on this platform.\n" \
<< "Statement '" #statement "' cannot be verified."; \
} else if (::testing::internal::AlwaysFalse()) { \
::testing::internal::RE::PartialMatch(".*", (regex)); \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
terminator; \
} else \
::testing::Message()
#define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) \
{ \
GTEST_LOG_(WARNING) << "Death tests are not supported on this platform.\n" \
<< "Statement '" #statement "' cannot be verified."; \
} \
else if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::RE::PartialMatch(".*", (regex)); \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
terminator; \
} \
else \
::testing::Message()
// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
@ -327,17 +328,13 @@ class GTEST_API_ KilledBySignal {
// useful when you are combining death test assertions with normal test
// assertions in one test.
#if GTEST_HAS_DEATH_TEST
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
EXPECT_DEATH(statement, regex)
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
ASSERT_DEATH(statement, regex)
#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) EXPECT_DEATH(statement, regex)
#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) ASSERT_DEATH(statement, regex)
#else
# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return)
#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return )
#endif
} // namespace testing
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_

View File

@ -52,14 +52,14 @@
#include "gtest/internal/gtest-port.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
// Ensures that there is at least one operator<< in the global namespace.
// See Message& operator<<(...) below for why.
void operator<<(const testing::internal::Secret&, int);
void operator<<(const testing::internal::Secret &, int);
namespace testing {
namespace testing
{
// The Message class works like an ostream repeater.
//
@ -87,132 +87,142 @@ namespace testing {
// latter (it causes an access violation if you do). The Message
// class hides this difference by treating a NULL char pointer as
// "(null)".
class GTEST_API_ Message {
private:
// The type of basic IO manipulators (endl, ends, and flush) for
// narrow streams.
typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
class GTEST_API_ Message
{
private:
// The type of basic IO manipulators (endl, ends, and flush) for
// narrow streams.
typedef std::ostream &(*BasicNarrowIoManip)(std::ostream &);
public:
// Constructs an empty Message.
Message();
public:
// Constructs an empty Message.
Message();
// Copy constructor.
Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
*ss_ << msg.GetString();
}
// Constructs a Message from a C-string.
explicit Message(const char* str) : ss_(new ::std::stringstream) {
*ss_ << str;
}
// Streams a non-pointer value to this object.
template <typename T>
inline Message& operator <<(const T& val) {
// Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std.
//
// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
// overloads are visible in either the std namespace or the global
// namespace, but not other namespaces, including the testing
// namespace which Google Test's Message class is in.
//
// To allow STL containers (and other types that has a << operator
// defined in the global namespace) to be used in Google Test
// assertions, testing::Message must access the custom << operator
// from the global namespace. With this using declaration,
// overloads of << defined in the global namespace and those
// visible via Koenig lookup are both exposed in this function.
using ::operator <<;
*ss_ << val;
return *this;
}
// Streams a pointer value to this object.
//
// This function is an overload of the previous one. When you
// stream a pointer to a Message, this definition will be used as it
// is more specialized. (The C++ Standard, section
// [temp.func.order].) If you stream a non-pointer, then the
// previous definition will be used.
//
// The reason for this overload is that streaming a NULL pointer to
// ostream is undefined behavior. Depending on the compiler, you
// may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL
// as "(null)".
template <typename T>
inline Message& operator <<(T* const& pointer) { // NOLINT
if (pointer == nullptr) {
*ss_ << "(null)";
} else {
*ss_ << pointer;
// Copy constructor.
Message(const Message &msg) : ss_(new ::std::stringstream)
{ // NOLINT
*ss_ << msg.GetString();
}
return *this;
}
// Since the basic IO manipulators are overloaded for both narrow
// and wide streams, we have to provide this specialized definition
// of operator <<, even though its body is the same as the
// templatized version above. Without this definition, streaming
// endl or other basic IO manipulators to Message will confuse the
// compiler.
Message& operator <<(BasicNarrowIoManip val) {
*ss_ << val;
return *this;
}
// Constructs a Message from a C-string.
explicit Message(const char *str) : ss_(new ::std::stringstream)
{
*ss_ << str;
}
// Instead of 1/0, we want to see true/false for bool values.
Message& operator <<(bool b) {
return *this << (b ? "true" : "false");
}
// Streams a non-pointer value to this object.
template <typename T> inline Message &operator<<(const T &val)
{
// Some libraries overload << for STL containers. These
// overloads are defined in the global namespace instead of ::std.
//
// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
// overloads are visible in either the std namespace or the global
// namespace, but not other namespaces, including the testing
// namespace which Google Test's Message class is in.
//
// To allow STL containers (and other types that has a << operator
// defined in the global namespace) to be used in Google Test
// assertions, testing::Message must access the custom << operator
// from the global namespace. With this using declaration,
// overloads of << defined in the global namespace and those
// visible via Koenig lookup are both exposed in this function.
using ::operator<<;
*ss_ << val;
return *this;
}
// These two overloads allow streaming a wide C string to a Message
// using the UTF-8 encoding.
Message& operator <<(const wchar_t* wide_c_str);
Message& operator <<(wchar_t* wide_c_str);
// Streams a pointer value to this object.
//
// This function is an overload of the previous one. When you
// stream a pointer to a Message, this definition will be used as it
// is more specialized. (The C++ Standard, section
// [temp.func.order].) If you stream a non-pointer, then the
// previous definition will be used.
//
// The reason for this overload is that streaming a NULL pointer to
// ostream is undefined behavior. Depending on the compiler, you
// may get "0", "(nil)", "(null)", or an access violation. To
// ensure consistent result across compilers, we always treat NULL
// as "(null)".
template <typename T> inline Message &operator<<(T *const &pointer)
{ // NOLINT
if (pointer == nullptr)
{
*ss_ << "(null)";
}
else
{
*ss_ << pointer;
}
return *this;
}
// Since the basic IO manipulators are overloaded for both narrow
// and wide streams, we have to provide this specialized definition
// of operator <<, even though its body is the same as the
// templatized version above. Without this definition, streaming
// endl or other basic IO manipulators to Message will confuse the
// compiler.
Message &operator<<(BasicNarrowIoManip val)
{
*ss_ << val;
return *this;
}
// Instead of 1/0, we want to see true/false for bool values.
Message &operator<<(bool b)
{
return *this << (b ? "true" : "false");
}
// These two overloads allow streaming a wide C string to a Message
// using the UTF-8 encoding.
Message &operator<<(const wchar_t *wide_c_str);
Message &operator<<(wchar_t *wide_c_str);
#if GTEST_HAS_STD_WSTRING
// Converts the given wide string to a narrow string using the UTF-8
// encoding, and streams the result to this Message object.
Message& operator <<(const ::std::wstring& wstr);
#endif // GTEST_HAS_STD_WSTRING
// Converts the given wide string to a narrow string using the UTF-8
// encoding, and streams the result to this Message object.
Message &operator<<(const ::std::wstring &wstr);
#endif // GTEST_HAS_STD_WSTRING
// Gets the text streamed to this object so far as an std::string.
// Each '\0' character in the buffer is replaced with "\\0".
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
std::string GetString() const;
// Gets the text streamed to this object so far as an std::string.
// Each '\0' character in the buffer is replaced with "\\0".
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
std::string GetString() const;
private:
// We'll hold the text streamed to this object here.
const std::unique_ptr< ::std::stringstream> ss_;
private:
// We'll hold the text streamed to this object here.
const std::unique_ptr<::std::stringstream> ss_;
// We declare (but don't implement) this to prevent the compiler
// from implementing the assignment operator.
void operator=(const Message&);
// We declare (but don't implement) this to prevent the compiler
// from implementing the assignment operator.
void operator=(const Message &);
};
// Streams a Message to an ostream.
inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
return os << sb.GetString();
inline std::ostream &operator<<(std::ostream &os, const Message &sb)
{
return os << sb.GetString();
}
namespace internal {
namespace internal
{
// Converts a streamable value to an std::string. A NULL pointer is
// converted to "(null)". When the input value is a ::string,
// ::std::string, ::wstring, or ::std::wstring object, each NUL
// character in it is replaced with "\\0".
template <typename T>
std::string StreamableToString(const T& streamable) {
return (Message() << streamable).GetString();
template <typename T> std::string StreamableToString(const T &streamable)
{
return (Message() << streamable).GetString();
}
} // namespace internal
} // namespace testing
} // namespace internal
} // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_

View File

@ -36,7 +36,6 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
// Value-parameterized tests allow you to test your code with different
// parameters without writing multiple copies of the same test.
//
@ -172,7 +171,7 @@ TEST_P(DerivedTest, DoesBlah) {
EXPECT_TRUE(foo.Blah(GetParam()));
}
#endif // 0
#endif // 0
#include <iterator>
#include <utility>
@ -181,7 +180,8 @@ TEST_P(DerivedTest, DoesBlah) {
#include "gtest/internal/gtest-param-util.h"
#include "gtest/internal/gtest-port.h"
namespace testing {
namespace testing
{
// Functions producing parameter generators.
//
@ -225,15 +225,14 @@ namespace testing {
// * Condition start < end must be satisfied in order for resulting sequences
// to contain any elements.
//
template <typename T, typename IncrementT>
internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
return internal::ParamGenerator<T>(
new internal::RangeGenerator<T, IncrementT>(start, end, step));
template <typename T, typename IncrementT> internal::ParamGenerator<T> Range(T start, T end, IncrementT step)
{
return internal::ParamGenerator<T>(new internal::RangeGenerator<T, IncrementT>(start, end, step));
}
template <typename T>
internal::ParamGenerator<T> Range(T start, T end) {
return Range(start, end, 1);
template <typename T> internal::ParamGenerator<T> Range(T start, T end)
{
return Range(start, end, 1);
}
// ValuesIn() function allows generation of tests with parameters coming from
@ -292,23 +291,21 @@ internal::ParamGenerator<T> Range(T start, T end) {
// ValuesIn(l.begin(), l.end()));
//
template <typename ForwardIterator>
internal::ParamGenerator<
typename std::iterator_traits<ForwardIterator>::value_type>
ValuesIn(ForwardIterator begin, ForwardIterator end) {
typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
return internal::ParamGenerator<ParamType>(
new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
internal::ParamGenerator<typename std::iterator_traits<ForwardIterator>::value_type> ValuesIn(ForwardIterator begin,
ForwardIterator end)
{
typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
return internal::ParamGenerator<ParamType>(new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
}
template <typename T, size_t N>
internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
return ValuesIn(array, array + N);
template <typename T, size_t N> internal::ParamGenerator<T> ValuesIn(const T (&array)[N])
{
return ValuesIn(array, array + N);
}
template <class Container>
internal::ParamGenerator<typename Container::value_type> ValuesIn(
const Container& container) {
return ValuesIn(container.begin(), container.end());
template <class Container> internal::ParamGenerator<typename Container::value_type> ValuesIn(const Container &container)
{
return ValuesIn(container.begin(), container.end());
}
// Values() allows generating tests from explicitly specified list of
@ -331,9 +328,9 @@ internal::ParamGenerator<typename Container::value_type> ValuesIn(
// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
//
//
template <typename... T>
internal::ValueArray<T...> Values(T... v) {
return internal::ValueArray<T...>(std::move(v)...);
template <typename... T> internal::ValueArray<T...> Values(T... v)
{
return internal::ValueArray<T...>(std::move(v)...);
}
// Bool() allows generating tests with parameters in a set of (false, true).
@ -356,8 +353,9 @@ internal::ValueArray<T...> Values(T... v) {
// }
// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool());
//
inline internal::ParamGenerator<bool> Bool() {
return Values(false, true);
inline internal::ParamGenerator<bool> Bool()
{
return Values(false, true);
}
// Combine() allows the user to combine two or more sequences to produce
@ -406,39 +404,38 @@ inline internal::ParamGenerator<bool> Bool() {
// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest,
// Combine(Bool(), Bool()));
//
template <typename... Generator>
internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
return internal::CartesianProductHolder<Generator...>(g...);
template <typename... Generator> internal::CartesianProductHolder<Generator...> Combine(const Generator &...g)
{
return internal::CartesianProductHolder<Generator...>(g...);
}
#define TEST_P(test_suite_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \
: public test_suite_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \
virtual void TestBody(); \
\
private: \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \
#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestPattern( \
GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_( \
test_suite_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \
test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_suite_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
#define TEST_P(test_suite_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) : public test_suite_name \
{ \
public: \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() \
{ \
} \
virtual void TestBody(); \
\
private: \
static int AddToRegistry() \
{ \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestPattern( \
GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
// The last argument to INSTANTIATE_TEST_SUITE_P allows the user to specify
// generator and an optional function or functor that generates custom test name
@ -457,47 +454,40 @@ internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
#define GTEST_GET_FIRST_(first, ...) first
#define GTEST_GET_SECOND_(first, second, ...) second
#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
gtest_##prefix##test_suite_name##_EvalGenerator_() { \
return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
} \
static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
const ::testing::TestParamInfo<test_suite_name::ParamType>& info) { \
if (::testing::internal::AlwaysFalse()) { \
::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))); \
auto t = std::make_tuple(__VA_ARGS__); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, \
"Too Many Args!"); \
} \
return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, \
::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
DUMMY_PARAM_))))(info); \
} \
static int gtest_##prefix##test_suite_name##_dummy_ \
GTEST_ATTRIBUTE_UNUSED_ = \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>( \
#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestSuiteInstantiation( \
#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
&gtest_##prefix##test_suite_name##_EvalGenerateName_, \
__FILE__, __LINE__)
#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
gtest_##prefix##test_suite_name##_EvalGenerator_() \
{ \
return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
} \
static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
const ::testing::TestParamInfo<test_suite_name::ParamType> &info) \
{ \
if (::testing::internal::AlwaysFalse()) \
{ \
::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))); \
auto t = std::make_tuple(__VA_ARGS__); \
static_assert(std::tuple_size<decltype(t)>::value <= 2, "Too Many Args!"); \
} \
return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
__VA_ARGS__, ::testing::internal::DefaultParamName<test_suite_name::ParamType>, DUMMY_PARAM_))))(info); \
} \
static int gtest_##prefix##test_suite_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
::testing::UnitTest::GetInstance() \
->parameterized_test_registry() \
.GetTestSuitePatternHolder<test_suite_name>(#test_suite_name, \
::testing::internal::CodeLocation(__FILE__, __LINE__)) \
->AddTestSuiteInstantiation(#prefix, &gtest_##prefix##test_suite_name##_EvalGenerator_, \
&gtest_##prefix##test_suite_name##_EvalGenerateName_, __FILE__, __LINE__)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \
""); \
INSTANTIATE_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
} // namespace testing
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_

View File

@ -38,10 +38,10 @@
#include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
namespace testing {
namespace testing
{
// This helper class can be used to mock out Google Test failure reporting
// so that we can test Google Test or code that builds on Google Test.
@ -52,71 +52,73 @@ namespace testing {
// generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor.
class GTEST_API_ ScopedFakeTestPartResultReporter
: public TestPartResultReporterInterface {
public:
// The two possible mocking modes of this object.
enum InterceptMode {
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
INTERCEPT_ALL_THREADS // Intercepts all failures.
};
class GTEST_API_ ScopedFakeTestPartResultReporter : public TestPartResultReporterInterface
{
public:
// The two possible mocking modes of this object.
enum InterceptMode
{
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
INTERCEPT_ALL_THREADS // Intercepts all failures.
};
// The c'tor sets this object as the test part result reporter used
// by Google Test. The 'result' parameter specifies where to report the
// results. This reporter will only catch failures generated in the current
// thread. DEPRECATED
explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
// The c'tor sets this object as the test part result reporter used
// by Google Test. The 'result' parameter specifies where to report the
// results. This reporter will only catch failures generated in the current
// thread. DEPRECATED
explicit ScopedFakeTestPartResultReporter(TestPartResultArray *result);
// Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
TestPartResultArray* result);
// Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, TestPartResultArray *result);
// The d'tor restores the previous test part result reporter.
~ScopedFakeTestPartResultReporter() override;
// The d'tor restores the previous test part result reporter.
~ScopedFakeTestPartResultReporter() override;
// Appends the TestPartResult object to the TestPartResultArray
// received in the constructor.
//
// This method is from the TestPartResultReporterInterface
// interface.
void ReportTestPartResult(const TestPartResult& result) override;
// Appends the TestPartResult object to the TestPartResultArray
// received in the constructor.
//
// This method is from the TestPartResultReporterInterface
// interface.
void ReportTestPartResult(const TestPartResult &result) override;
private:
void Init();
private:
void Init();
const InterceptMode intercept_mode_;
TestPartResultReporterInterface* old_reporter_;
TestPartResultArray* const result_;
const InterceptMode intercept_mode_;
TestPartResultReporterInterface *old_reporter_;
TestPartResultArray *const result_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
};
namespace internal {
namespace internal
{
// A helper class for implementing EXPECT_FATAL_FAILURE() and
// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
// TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated.
class GTEST_API_ SingleFailureChecker {
public:
// The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray* results,
TestPartResult::Type type, const std::string& substr);
~SingleFailureChecker();
private:
const TestPartResultArray* const results_;
const TestPartResult::Type type_;
const std::string substr_;
class GTEST_API_ SingleFailureChecker
{
public:
// The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray *results, TestPartResult::Type type, const std::string &substr);
~SingleFailureChecker();
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
private:
const TestPartResultArray *const results_;
const TestPartResult::Type type_;
const std::string substr_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
};
} // namespace internal
} // namespace internal
} // namespace testing
} // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// A set of macros for testing Google Test assertions or code that's expected
// to generate Google Test fatal failures. It verifies that the given
@ -141,39 +143,47 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// helper macro, due to some peculiarity in how the preprocessor
// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
// gtest_unittest.cc will fail to compile if we do that.
#define EXPECT_FATAL_FAILURE(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
public:\
static void Execute() { statement; }\
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE(statement, substr) \
do \
{ \
class GTestExpectFatalFailureHelper \
{ \
public: \
static void Execute() \
{ \
statement; \
} \
}; \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
::testing::TestPartResult::kFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
public:\
static void Execute() { statement; }\
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ALL_THREADS, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do \
{ \
class GTestExpectFatalFailureHelper \
{ \
public: \
static void Execute() \
{ \
statement; \
} \
}; \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker(&gtest_failures, \
::testing::TestPartResult::kFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
GTestExpectFatalFailureHelper::Execute(); \
} \
} while (::testing::internal::AlwaysFalse())
// A macro for testing Google Test assertions or code that's expected to
// generate Google Test non-fatal failures. It asserts that the given
@ -207,32 +217,36 @@ GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
// instead of
// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
// to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
do \
{ \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker( \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures); \
if (::testing::internal::AlwaysTrue()) \
{ \
statement; \
} \
} \
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
&gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do \
{ \
::testing::TestPartResultArray gtest_failures; \
::testing::internal::SingleFailureChecker gtest_checker( \
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, (substr)); \
{ \
::testing::ScopedFakeTestPartResultReporter gtest_reporter( \
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &gtest_failures); \
if (::testing::internal::AlwaysTrue()) \
{ \
statement; \
} \
} \
} while (::testing::internal::AlwaysFalse())
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_

View File

@ -32,128 +32,163 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#include <iosfwd>
#include <vector>
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-string.h"
#include <iosfwd>
#include <vector>
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
/* class A needs to have dll-interface to be used by clients of class B */)
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 /* class A needs to have dll-interface to be used by clients of class B */)
namespace testing {
namespace testing
{
// A copyable object representing the result of a test part (i.e. an
// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
//
// Don't inherit from TestPartResult as its destructor is not virtual.
class GTEST_API_ TestPartResult {
public:
// The possible outcomes of a test part (i.e. an assertion or an
// explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
enum Type {
kSuccess, // Succeeded.
kNonFatalFailure, // Failed but the test can continue.
kFatalFailure, // Failed and the test should be terminated.
kSkip // Skipped.
};
class GTEST_API_ TestPartResult
{
public:
// The possible outcomes of a test part (i.e. an assertion or an
// explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
enum Type
{
kSuccess, // Succeeded.
kNonFatalFailure, // Failed but the test can continue.
kFatalFailure, // Failed and the test should be terminated.
kSkip // Skipped.
};
// C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a
// TestPartResult object.
TestPartResult(Type a_type, const char* a_file_name, int a_line_number,
const char* a_message)
: type_(a_type),
file_name_(a_file_name == nullptr ? "" : a_file_name),
line_number_(a_line_number),
summary_(ExtractSummary(a_message)),
message_(a_message) {}
// C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a
// TestPartResult object.
TestPartResult(Type a_type, const char *a_file_name, int a_line_number, const char *a_message)
: type_(a_type), file_name_(a_file_name == nullptr ? "" : a_file_name), line_number_(a_line_number),
summary_(ExtractSummary(a_message)), message_(a_message)
{
}
// Gets the outcome of the test part.
Type type() const { return type_; }
// Gets the outcome of the test part.
Type type() const
{
return type_;
}
// Gets the name of the source file where the test part took place, or
// NULL if it's unknown.
const char* file_name() const {
return file_name_.empty() ? nullptr : file_name_.c_str();
}
// Gets the name of the source file where the test part took place, or
// NULL if it's unknown.
const char *file_name() const
{
return file_name_.empty() ? nullptr : file_name_.c_str();
}
// Gets the line in the source file where the test part took place,
// or -1 if it's unknown.
int line_number() const { return line_number_; }
// Gets the line in the source file where the test part took place,
// or -1 if it's unknown.
int line_number() const
{
return line_number_;
}
// Gets the summary of the failure message.
const char* summary() const { return summary_.c_str(); }
// Gets the summary of the failure message.
const char *summary() const
{
return summary_.c_str();
}
// Gets the message associated with the test part.
const char* message() const { return message_.c_str(); }
// Gets the message associated with the test part.
const char *message() const
{
return message_.c_str();
}
// Returns true if and only if the test part was skipped.
bool skipped() const { return type_ == kSkip; }
// Returns true if and only if the test part was skipped.
bool skipped() const
{
return type_ == kSkip;
}
// Returns true if and only if the test part passed.
bool passed() const { return type_ == kSuccess; }
// Returns true if and only if the test part passed.
bool passed() const
{
return type_ == kSuccess;
}
// Returns true if and only if the test part non-fatally failed.
bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
// Returns true if and only if the test part non-fatally failed.
bool nonfatally_failed() const
{
return type_ == kNonFatalFailure;
}
// Returns true if and only if the test part fatally failed.
bool fatally_failed() const { return type_ == kFatalFailure; }
// Returns true if and only if the test part fatally failed.
bool fatally_failed() const
{
return type_ == kFatalFailure;
}
// Returns true if and only if the test part failed.
bool failed() const { return fatally_failed() || nonfatally_failed(); }
// Returns true if and only if the test part failed.
bool failed() const
{
return fatally_failed() || nonfatally_failed();
}
private:
Type type_;
private:
Type type_;
// Gets the summary of the failure message by omitting the stack
// trace in it.
static std::string ExtractSummary(const char* message);
// Gets the summary of the failure message by omitting the stack
// trace in it.
static std::string ExtractSummary(const char *message);
// The name of the source file where the test part took place, or
// "" if the source file is unknown.
std::string file_name_;
// The line in the source file where the test part took place, or -1
// if the line number is unknown.
int line_number_;
std::string summary_; // The test failure summary.
std::string message_; // The test failure message.
// The name of the source file where the test part took place, or
// "" if the source file is unknown.
std::string file_name_;
// The line in the source file where the test part took place, or -1
// if the line number is unknown.
int line_number_;
std::string summary_; // The test failure summary.
std::string message_; // The test failure message.
};
// Prints a TestPartResult object.
std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
std::ostream &operator<<(std::ostream &os, const TestPartResult &result);
// An array of TestPartResult objects.
//
// Don't inherit from TestPartResultArray as its destructor is not
// virtual.
class GTEST_API_ TestPartResultArray {
public:
TestPartResultArray() {}
class GTEST_API_ TestPartResultArray
{
public:
TestPartResultArray()
{
}
// Appends the given TestPartResult to the array.
void Append(const TestPartResult& result);
// Appends the given TestPartResult to the array.
void Append(const TestPartResult &result);
// Returns the TestPartResult at the given index (0-based).
const TestPartResult& GetTestPartResult(int index) const;
// Returns the TestPartResult at the given index (0-based).
const TestPartResult &GetTestPartResult(int index) const;
// Returns the number of TestPartResult objects in the array.
int size() const;
// Returns the number of TestPartResult objects in the array.
int size() const;
private:
std::vector<TestPartResult> array_;
private:
std::vector<TestPartResult> array_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
};
// This interface knows how to report a test part result.
class GTEST_API_ TestPartResultReporterInterface {
public:
virtual ~TestPartResultReporterInterface() {}
class GTEST_API_ TestPartResultReporterInterface
{
public:
virtual ~TestPartResultReporterInterface()
{
}
virtual void ReportTestPartResult(const TestPartResult& result) = 0;
virtual void ReportTestPartResult(const TestPartResult &result) = 0;
};
namespace internal {
namespace internal
{
// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
// statement generates new fatal failures. To do so it registers itself as the
@ -161,24 +196,28 @@ namespace internal {
// reported, it only delegates the reporting to the former result reporter.
// The original result reporter is restored in the destructor.
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
class GTEST_API_ HasNewFatalFailureHelper
: public TestPartResultReporterInterface {
public:
HasNewFatalFailureHelper();
~HasNewFatalFailureHelper() override;
void ReportTestPartResult(const TestPartResult& result) override;
bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
private:
bool has_new_fatal_failure_;
TestPartResultReporterInterface* original_reporter_;
class GTEST_API_ HasNewFatalFailureHelper : public TestPartResultReporterInterface
{
public:
HasNewFatalFailureHelper();
~HasNewFatalFailureHelper() override;
void ReportTestPartResult(const TestPartResult &result) override;
bool has_new_fatal_failure() const
{
return has_new_fatal_failure_;
}
GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
private:
bool has_new_fatal_failure_;
TestPartResultReporterInterface *original_reporter_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
};
} // namespace internal
} // namespace internal
} // namespace testing
} // namespace testing
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_

View File

@ -27,7 +27,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// GOOGLETEST_CM0001 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
@ -101,7 +100,7 @@ TYPED_TEST(FooTest, HasPropertyA) { ... }
// };
// TYPED_TEST_SUITE(FooTest, MyTypes, MyTypeNames);
#endif // 0
#endif // 0
// Type-parameterized tests are abstract test patterns parameterized
// by a type. Compared with typed tests, type-parameterized tests
@ -168,7 +167,7 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// generate custom names.
// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes, MyTypeNames);
#endif // 0
#endif // 0
#include "gtest/internal/gtest-port.h"
#include "gtest/internal/gtest-type-util.h"
@ -185,50 +184,38 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
// Expands to the name of the typedef for the NameGenerator, responsible for
// creating the suffixes of the name.
#define GTEST_NAME_GENERATOR_(TestSuiteName) \
gtest_type_params_##TestSuiteName##_NameGenerator
#define GTEST_NAME_GENERATOR_(TestSuiteName) gtest_type_params_##TestSuiteName##_NameGenerator
#define TYPED_TEST_SUITE(CaseName, Types, ...) \
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_( \
CaseName); \
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \
GTEST_NAME_GENERATOR_(CaseName)
#define TYPED_TEST_SUITE(CaseName, Types, ...) \
typedef ::testing::internal::TypeList<Types>::type GTEST_TYPE_PARAMS_(CaseName); \
typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type GTEST_NAME_GENERATOR_(CaseName)
# define TYPED_TEST(CaseName, TestName) \
template <typename gtest_TypeParam_> \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
: public CaseName<gtest_TypeParam_> { \
private: \
typedef CaseName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##CaseName##_##TestName##_registered_ \
GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTest< \
CaseName, \
::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)>, \
GTEST_TYPE_PARAMS_( \
CaseName)>::Register("", \
::testing::internal::CodeLocation( \
__FILE__, __LINE__), \
#CaseName, #TestName, 0, \
::testing::internal::GenerateNames< \
GTEST_NAME_GENERATOR_(CaseName), \
GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> \
void GTEST_TEST_CLASS_NAME_(CaseName, \
TestName)<gtest_TypeParam_>::TestBody()
#define TYPED_TEST(CaseName, TestName) \
template <typename gtest_TypeParam_> \
class GTEST_TEST_CLASS_NAME_(CaseName, TestName) : public CaseName<gtest_TypeParam_> \
{ \
private: \
typedef CaseName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTest< \
CaseName, ::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
GTEST_TYPE_PARAMS_(CaseName)>:: \
Register( \
"", ::testing::internal::CodeLocation(__FILE__, __LINE__), #CaseName, #TestName, 0, \
::testing::internal::GenerateNames<GTEST_NAME_GENERATOR_(CaseName), GTEST_TYPE_PARAMS_(CaseName)>()); \
template <typename gtest_TypeParam_> void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE \
static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \
TYPED_TEST_SUITE
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE \
static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \
TYPED_TEST_SUITE
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_HAS_TYPED_TEST
#endif // GTEST_HAS_TYPED_TEST
// Implements type-parameterized tests.
@ -245,86 +232,73 @@ INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
//
// Expands to the name of the variable used to remember the names of
// the defined tests in the given test suite.
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \
gtest_typed_test_suite_p_state_##TestSuiteName##_
#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) gtest_typed_test_suite_p_state_##TestSuiteName##_
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
//
// Expands to the name of the variable used to remember the names of
// the registered tests in the given test suite.
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \
gtest_registered_test_names_##TestSuiteName##_
#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) gtest_registered_test_names_##TestSuiteName##_
// The variables defined in the type-parameterized test macros are
// static as typically these macros are used in a .h file that can be
// #included in multiple translation units linked together.
#define TYPED_TEST_SUITE_P(SuiteName) \
static ::testing::internal::TypedTestSuitePState \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
#define TYPED_TEST_SUITE_P(SuiteName) \
static ::testing::internal::TypedTestSuitePState GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE_P \
static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \
TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_CASE_P \
static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \
TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define TYPED_TEST_P(SuiteName, TestName) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
template <typename gtest_TypeParam_> \
class TestName : public SuiteName<gtest_TypeParam_> { \
private: \
typedef SuiteName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \
__FILE__, __LINE__, #SuiteName, #TestName); \
} \
template <typename gtest_TypeParam_> \
void GTEST_SUITE_NAMESPACE_( \
SuiteName)::TestName<gtest_TypeParam_>::TestBody()
#define TYPED_TEST_P(SuiteName, TestName) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
{ \
template <typename gtest_TypeParam_> class TestName : public SuiteName<gtest_TypeParam_> \
{ \
private: \
typedef SuiteName<gtest_TypeParam_> TestFixture; \
typedef gtest_TypeParam_ TypeParam; \
virtual void TestBody(); \
}; \
static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName(__FILE__, __LINE__, #SuiteName, #TestName); \
} \
template <typename gtest_TypeParam_> void GTEST_SUITE_NAMESPACE_(SuiteName)::TestName<gtest_TypeParam_>::TestBody()
#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
} \
static const char* const GTEST_REGISTERED_TEST_NAMES_( \
SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \
__FILE__, __LINE__, #__VA_ARGS__)
#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
namespace GTEST_SUITE_NAMESPACE_(SuiteName) \
{ \
typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
} \
static const char *const GTEST_REGISTERED_TEST_NAMES_(SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames(__FILE__, __LINE__, #__VA_ARGS__)
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define REGISTER_TYPED_TEST_CASE_P \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \
""); \
REGISTER_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define REGISTER_TYPED_TEST_CASE_P \
static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), ""); \
REGISTER_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTestSuite< \
SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
::testing::internal::TypeList<Types>::type>:: \
Register(#Prefix, \
::testing::internal::CodeLocation(__FILE__, __LINE__), \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, \
GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames< \
::testing::internal::NameGeneratorSelector< \
__VA_ARGS__>::type, \
::testing::internal::TypeList<Types>::type>())
#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
::testing::internal::TypeParameterizedTestSuite<SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
::testing::internal::TypeList<Types>::type>:: \
Register(#Prefix, ::testing::internal::CodeLocation(__FILE__, __LINE__), \
&GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName), #SuiteName, GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
::testing::internal::GenerateNames<::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type, \
::testing::internal::TypeList<Types>::type>())
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_CASE_P \
static_assert( \
::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#define INSTANTIATE_TYPED_TEST_CASE_P \
static_assert(::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
INSTANTIATE_TYPED_TEST_SUITE_P
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
#endif // GTEST_HAS_TYPED_TEST_P
#endif // GTEST_HAS_TYPED_TEST_P
#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More